aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVincent Abriou <vincent.abriou@st.com>2015-07-31 05:32:13 -0400
committerBenjamin Gaignard <benjamin.gaignard@linaro.org>2015-08-03 08:25:01 -0400
commit871bcdfea68560991bd650406e47a801ab9d635d (patch)
tree8a760b230480bb3227e916b157b3b4ff628cd603
parentbf60b29f8e811c9593dcabaa4d25e412f9e10b73 (diff)
drm/sti: code clean up
Purpose is to simplify the STI driver: - remove layer structure - consider video subdev as part of the compositor (like mixer subdev) - remove useless STI_VID0 and STI_VID1 enum Signed-off-by: Vincent Abriou <vincent.abriou@st.com> Reviewed-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
-rw-r--r--drivers/gpu/drm/sti/Makefile1
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c135
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c108
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h3
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c34
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c322
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h91
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c185
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h3
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c201
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c213
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h131
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c55
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h16
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c57
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h18
19 files changed, 713 insertions, 886 deletions
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f0f1e4ee2d92..505b3ba287ce 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,5 +1,4 @@
1sticompositor-y := \ 1sticompositor-y := \
2 sti_layer.o \
3 sti_mixer.o \ 2 sti_mixer.o \
4 sti_gdp.o \ 3 sti_gdp.o \
5 sti_vid.o \ 4 sti_vid.o \
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 43215d3020fb..68c5c954ce9a 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -14,10 +14,12 @@
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15 15
16#include "sti_compositor.h" 16#include "sti_compositor.h"
17#include "sti_cursor.h"
17#include "sti_drm_crtc.h" 18#include "sti_drm_crtc.h"
18#include "sti_drm_drv.h" 19#include "sti_drm_drv.h"
19#include "sti_drm_plane.h" 20#include "sti_drm_plane.h"
20#include "sti_gdp.h" 21#include "sti_gdp.h"
22#include "sti_vid.h"
21#include "sti_vtg.h" 23#include "sti_vtg.h"
22 24
23/* 25/*
@@ -31,7 +33,7 @@ struct sti_compositor_data stih407_compositor_data = {
31 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, 33 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
32 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300}, 34 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
33 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400}, 35 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
34 {STI_VID_SUBDEV, (int)STI_VID_0, 0x700}, 36 {STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
35 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}, 37 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
36 {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00}, 38 {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
37 }, 39 },
@@ -53,14 +55,31 @@ struct sti_compositor_data stih416_compositor_data = {
53 }, 55 },
54}; 56};
55 57
56static int sti_compositor_init_subdev(struct sti_compositor *compo, 58static int sti_compositor_bind(struct device *dev,
57 struct sti_compositor_subdev_descriptor *desc, 59 struct device *master,
58 unsigned int array_size) 60 void *data)
59{ 61{
60 unsigned int i, mixer_id = 0, layer_id = 0; 62 struct sti_compositor *compo = dev_get_drvdata(dev);
63 struct drm_device *drm_dev = data;
64 unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0, plane_id = 0;
65 struct sti_drm_private *dev_priv = drm_dev->dev_private;
66 struct drm_plane *cursor = NULL;
67 struct drm_plane *primary = NULL;
68 struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
69 unsigned int array_size = compo->data.nb_subdev;
70
71 struct sti_plane *plane;
61 72
73 dev_priv->compo = compo;
74
75 /* Register mixer subdev and video subdev first */
62 for (i = 0; i < array_size; i++) { 76 for (i = 0; i < array_size; i++) {
63 switch (desc[i].type) { 77 switch (desc[i].type) {
78 case STI_VID_SUBDEV:
79 compo->vid[vid_id++] =
80 sti_vid_create(compo->dev, desc[i].id,
81 compo->regs + desc[i].offset);
82 break;
64 case STI_MIXER_MAIN_SUBDEV: 83 case STI_MIXER_MAIN_SUBDEV:
65 case STI_MIXER_AUX_SUBDEV: 84 case STI_MIXER_AUX_SUBDEV:
66 compo->mixer[mixer_id++] = 85 compo->mixer[mixer_id++] =
@@ -68,81 +87,72 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
68 compo->regs + desc[i].offset); 87 compo->regs + desc[i].offset);
69 break; 88 break;
70 case STI_GPD_SUBDEV: 89 case STI_GPD_SUBDEV:
71 case STI_VID_SUBDEV:
72 case STI_CURSOR_SUBDEV: 90 case STI_CURSOR_SUBDEV:
73 compo->layer[layer_id++] = 91 /* Nothing to do, wait for the second round */
74 sti_layer_create(compo->dev, desc[i].id,
75 compo->regs + desc[i].offset);
76 break; 92 break;
77 default: 93 default:
78 DRM_ERROR("Unknow subdev compoment type\n"); 94 DRM_ERROR("Unknow subdev compoment type\n");
79 return 1; 95 return 1;
80 } 96 }
81
82 } 97 }
83 compo->nb_mixers = mixer_id;
84 compo->nb_layers = layer_id;
85
86 return 0;
87}
88 98
89static int sti_compositor_bind(struct device *dev, struct device *master, 99 /* Register the other subdevs, create crtc and planes */
90 void *data) 100 for (i = 0; i < array_size; i++) {
91{ 101 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
92 struct sti_compositor *compo = dev_get_drvdata(dev);
93 struct drm_device *drm_dev = data;
94 unsigned int i, crtc = 0, plane = 0;
95 struct sti_drm_private *dev_priv = drm_dev->dev_private;
96 struct drm_plane *cursor = NULL;
97 struct drm_plane *primary = NULL;
98
99 dev_priv->compo = compo;
100
101 for (i = 0; i < compo->nb_layers; i++) {
102 if (compo->layer[i]) {
103 enum sti_layer_desc desc = compo->layer[i]->desc;
104 enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
105 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
106 102
107 if (crtc < compo->nb_mixers) 103 if (crtc_id < mixer_id)
108 plane_type = DRM_PLANE_TYPE_PRIMARY; 104 plane_type = DRM_PLANE_TYPE_PRIMARY;
109 105
110 switch (type) { 106 switch (desc[i].type) {
111 case STI_CUR: 107 case STI_MIXER_MAIN_SUBDEV:
112 cursor = sti_drm_plane_init(drm_dev, 108 case STI_MIXER_AUX_SUBDEV:
113 compo->layer[i], 109 case STI_VID_SUBDEV:
114 1, DRM_PLANE_TYPE_CURSOR); 110 /* Nothing to do, already done at the first round */
115 break; 111 break;
116 case STI_GDP: 112 case STI_CURSOR_SUBDEV:
117 case STI_VID: 113 plane = sti_cursor_create(compo->dev, desc[i].id,
118 primary = sti_drm_plane_init(drm_dev, 114 compo->regs + desc[i].offset);
119 compo->layer[i], 115 if (!plane) {
120 (1 << compo->nb_mixers) - 1, 116 DRM_ERROR("Can't create CURSOR plane\n");
121 plane_type);
122 plane++;
123 break; 117 break;
124 case STI_BCK: 118 }
125 case STI_VDP: 119 cursor = sti_drm_plane_init(drm_dev, plane, 1,
120 DRM_PLANE_TYPE_CURSOR);
121 plane_id++;
122 break;
123 case STI_GPD_SUBDEV:
124 plane = sti_gdp_create(compo->dev, desc[i].id,
125 compo->regs + desc[i].offset);
126 if (!plane) {
127 DRM_ERROR("Can't create GDP plane\n");
126 break; 128 break;
127 } 129 }
130 primary = sti_drm_plane_init(drm_dev, plane,
131 (1 << mixer_id) - 1,
132 plane_type);
133 plane_id++;
134 break;
135 default:
136 DRM_ERROR("Unknown subdev compoment type\n");
137 return 1;
138 }
128 139
129 /* The first planes are reserved for primary planes*/ 140 /* The first planes are reserved for primary planes*/
130 if (crtc < compo->nb_mixers && primary) { 141 if (crtc_id < mixer_id && primary) {
131 sti_drm_crtc_init(drm_dev, compo->mixer[crtc], 142 sti_drm_crtc_init(drm_dev, compo->mixer[crtc_id],
132 primary, cursor); 143 primary, cursor);
133 crtc++; 144 crtc_id++;
134 cursor = NULL; 145 cursor = NULL;
135 primary = NULL; 146 primary = NULL;
136 }
137 } 147 }
138 } 148 }
139 149
140 drm_vblank_init(drm_dev, crtc); 150 drm_vblank_init(drm_dev, crtc_id);
141 /* Allow usage of vblank without having to call drm_irq_install */ 151 /* Allow usage of vblank without having to call drm_irq_install */
142 drm_dev->irq_enabled = 1; 152 drm_dev->irq_enabled = 1;
143 153
144 DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n", 154 DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
145 crtc, plane); 155 crtc_id, plane_id);
146 DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n"); 156 DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
147 157
148 return 0; 158 return 0;
@@ -179,7 +189,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
179 struct device_node *vtg_np; 189 struct device_node *vtg_np;
180 struct sti_compositor *compo; 190 struct sti_compositor *compo;
181 struct resource *res; 191 struct resource *res;
182 int err;
183 192
184 compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL); 193 compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
185 if (!compo) { 194 if (!compo) {
@@ -251,12 +260,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
251 if (vtg_np) 260 if (vtg_np)
252 compo->vtg_aux = of_vtg_find(vtg_np); 261 compo->vtg_aux = of_vtg_find(vtg_np);
253 262
254 /* Initialize compositor subdevices */
255 err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
256 compo->data.nb_subdev);
257 if (err)
258 return err;
259
260 platform_set_drvdata(pdev, compo); 263 platform_set_drvdata(pdev, compo);
261 264
262 return component_add(&pdev->dev, &sti_compositor_ops); 265 return component_add(&pdev->dev, &sti_compositor_ops);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 019eb44c62cc..77f99780313a 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -12,13 +12,13 @@
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14 14
15#include "sti_layer.h" 15#include "sti_drm_plane.h"
16#include "sti_mixer.h" 16#include "sti_mixer.h"
17 17
18#define WAIT_NEXT_VSYNC_MS 50 /*ms*/ 18#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
19 19
20#define STI_MAX_LAYER 8
21#define STI_MAX_MIXER 2 20#define STI_MAX_MIXER 2
21#define STI_MAX_VID 1
22 22
23enum sti_compositor_subdev_type { 23enum sti_compositor_subdev_type {
24 STI_MIXER_MAIN_SUBDEV, 24 STI_MIXER_MAIN_SUBDEV,
@@ -59,11 +59,9 @@ struct sti_compositor_data {
59 * @rst_main: reset control of the main path 59 * @rst_main: reset control of the main path
60 * @rst_aux: reset control of the aux path 60 * @rst_aux: reset control of the aux path
61 * @mixer: array of mixers 61 * @mixer: array of mixers
62 * @vid: array of vids
62 * @vtg_main: vtg for main data path 63 * @vtg_main: vtg for main data path
63 * @vtg_aux: vtg for auxillary data path 64 * @vtg_aux: vtg for auxillary data path
64 * @layer: array of layers
65 * @nb_mixers: number of mixers for this compositor
66 * @nb_layers: number of layers (GDP,VID,...) for this compositor
67 * @vtg_vblank_nb: callback for VTG VSYNC notification 65 * @vtg_vblank_nb: callback for VTG VSYNC notification
68 */ 66 */
69struct sti_compositor { 67struct sti_compositor {
@@ -77,11 +75,9 @@ struct sti_compositor {
77 struct reset_control *rst_main; 75 struct reset_control *rst_main;
78 struct reset_control *rst_aux; 76 struct reset_control *rst_aux;
79 struct sti_mixer *mixer[STI_MAX_MIXER]; 77 struct sti_mixer *mixer[STI_MAX_MIXER];
78 struct sti_vid *vid[STI_MAX_VID];
80 struct sti_vtg *vtg_main; 79 struct sti_vtg *vtg_main;
81 struct sti_vtg *vtg_aux; 80 struct sti_vtg *vtg_aux;
82 struct sti_layer *layer[STI_MAX_LAYER];
83 int nb_mixers;
84 int nb_layers;
85 struct notifier_block vtg_vblank_nb; 81 struct notifier_block vtg_vblank_nb;
86}; 82};
87 83
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 010eaee60bf7..cd12403dadcf 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -8,7 +8,7 @@
8#include <drm/drmP.h> 8#include <drm/drmP.h>
9 9
10#include "sti_cursor.h" 10#include "sti_cursor.h"
11#include "sti_layer.h" 11#include "sti_drm_plane.h"
12#include "sti_vtg.h" 12#include "sti_vtg.h"
13 13
14/* Registers */ 14/* Registers */
@@ -42,7 +42,9 @@ struct dma_pixmap {
42/** 42/**
43 * STI Cursor structure 43 * STI Cursor structure
44 * 44 *
45 * @layer: layer structure 45 * @sti_plane: sti_plane structure
46 * @dev: driver device
47 * @regs: cursor registers
46 * @width: cursor width 48 * @width: cursor width
47 * @height: cursor height 49 * @height: cursor height
48 * @clut: color look up table 50 * @clut: color look up table
@@ -50,7 +52,9 @@ struct dma_pixmap {
50 * @pixmap: pixmap dma buffer (clut8-format cursor) 52 * @pixmap: pixmap dma buffer (clut8-format cursor)
51 */ 53 */
52struct sti_cursor { 54struct sti_cursor {
53 struct sti_layer layer; 55 struct sti_plane plane;
56 struct device *dev;
57 void __iomem *regs;
54 unsigned int width; 58 unsigned int width;
55 unsigned int height; 59 unsigned int height;
56 unsigned short *clut; 60 unsigned short *clut;
@@ -62,22 +66,22 @@ static const uint32_t cursor_supported_formats[] = {
62 DRM_FORMAT_ARGB8888, 66 DRM_FORMAT_ARGB8888,
63}; 67};
64 68
65#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer) 69#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
66 70
67static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer) 71static const uint32_t *sti_cursor_get_formats(struct sti_plane *plane)
68{ 72{
69 return cursor_supported_formats; 73 return cursor_supported_formats;
70} 74}
71 75
72static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer) 76static unsigned int sti_cursor_get_nb_formats(struct sti_plane *plane)
73{ 77{
74 return ARRAY_SIZE(cursor_supported_formats); 78 return ARRAY_SIZE(cursor_supported_formats);
75} 79}
76 80
77static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer) 81static void sti_cursor_argb8888_to_clut8(struct sti_plane *plane)
78{ 82{
79 struct sti_cursor *cursor = to_sti_cursor(layer); 83 struct sti_cursor *cursor = to_sti_cursor(plane);
80 u32 *src = layer->vaddr; 84 u32 *src = plane->vaddr;
81 u8 *dst = cursor->pixmap.base; 85 u8 *dst = cursor->pixmap.base;
82 unsigned int i, j; 86 unsigned int i, j;
83 u32 a, r, g, b; 87 u32 a, r, g, b;
@@ -96,42 +100,42 @@ static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
96 } 100 }
97} 101}
98 102
99static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare) 103static int sti_cursor_prepare_plane(struct sti_plane *plane, bool first_prepare)
100{ 104{
101 struct sti_cursor *cursor = to_sti_cursor(layer); 105 struct sti_cursor *cursor = to_sti_cursor(plane);
102 struct drm_display_mode *mode = layer->mode; 106 struct drm_display_mode *mode = plane->mode;
103 u32 y, x; 107 u32 y, x;
104 u32 val; 108 u32 val;
105 109
106 DRM_DEBUG_DRIVER("\n"); 110 DRM_DEBUG_DRIVER("\n");
107 111
108 dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 112 dev_dbg(cursor->dev, "%s %s\n", __func__, sti_plane_to_str(plane));
109 113
110 if (layer->src_w < STI_CURS_MIN_SIZE || 114 if (plane->src_w < STI_CURS_MIN_SIZE ||
111 layer->src_h < STI_CURS_MIN_SIZE || 115 plane->src_h < STI_CURS_MIN_SIZE ||
112 layer->src_w > STI_CURS_MAX_SIZE || 116 plane->src_w > STI_CURS_MAX_SIZE ||
113 layer->src_h > STI_CURS_MAX_SIZE) { 117 plane->src_h > STI_CURS_MAX_SIZE) {
114 DRM_ERROR("Invalid cursor size (%dx%d)\n", 118 DRM_ERROR("Invalid cursor size (%dx%d)\n",
115 layer->src_w, layer->src_h); 119 plane->src_w, plane->src_h);
116 return -EINVAL; 120 return -EINVAL;
117 } 121 }
118 122
119 /* If the cursor size has changed, re-allocated the pixmap */ 123 /* If the cursor size has changed, re-allocated the pixmap */
120 if (!cursor->pixmap.base || 124 if (!cursor->pixmap.base ||
121 (cursor->width != layer->src_w) || 125 (cursor->width != plane->src_w) ||
122 (cursor->height != layer->src_h)) { 126 (cursor->height != plane->src_h)) {
123 cursor->width = layer->src_w; 127 cursor->width = plane->src_w;
124 cursor->height = layer->src_h; 128 cursor->height = plane->src_h;
125 129
126 if (cursor->pixmap.base) 130 if (cursor->pixmap.base)
127 dma_free_writecombine(layer->dev, 131 dma_free_writecombine(cursor->dev,
128 cursor->pixmap.size, 132 cursor->pixmap.size,
129 cursor->pixmap.base, 133 cursor->pixmap.base,
130 cursor->pixmap.paddr); 134 cursor->pixmap.paddr);
131 135
132 cursor->pixmap.size = cursor->width * cursor->height; 136 cursor->pixmap.size = cursor->width * cursor->height;
133 137
134 cursor->pixmap.base = dma_alloc_writecombine(layer->dev, 138 cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
135 cursor->pixmap.size, 139 cursor->pixmap.size,
136 &cursor->pixmap.paddr, 140 &cursor->pixmap.paddr,
137 GFP_KERNEL | GFP_DMA); 141 GFP_KERNEL | GFP_DMA);
@@ -142,55 +146,54 @@ static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
142 } 146 }
143 147
144 /* Convert ARGB8888 to CLUT8 */ 148 /* Convert ARGB8888 to CLUT8 */
145 sti_cursor_argb8888_to_clut8(layer); 149 sti_cursor_argb8888_to_clut8(plane);
146 150
147 /* AWS and AWE depend on the mode */ 151 /* AWS and AWE depend on the mode */
148 y = sti_vtg_get_line_number(*mode, 0); 152 y = sti_vtg_get_line_number(*mode, 0);
149 x = sti_vtg_get_pixel_number(*mode, 0); 153 x = sti_vtg_get_pixel_number(*mode, 0);
150 val = y << 16 | x; 154 val = y << 16 | x;
151 writel(val, layer->regs + CUR_AWS); 155 writel(val, cursor->regs + CUR_AWS);
152 y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); 156 y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
153 x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); 157 x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
154 val = y << 16 | x; 158 val = y << 16 | x;
155 writel(val, layer->regs + CUR_AWE); 159 writel(val, cursor->regs + CUR_AWE);
156 160
157 if (first_prepare) { 161 if (first_prepare) {
158 /* Set and fetch CLUT */ 162 /* Set and fetch CLUT */
159 writel(cursor->clut_paddr, layer->regs + CUR_CML); 163 writel(cursor->clut_paddr, cursor->regs + CUR_CML);
160 writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL); 164 writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
161 } 165 }
162 166
163 return 0; 167 return 0;
164} 168}
165 169
166static int sti_cursor_commit_layer(struct sti_layer *layer) 170static int sti_cursor_commit_plane(struct sti_plane *plane)
167{ 171{
168 struct sti_cursor *cursor = to_sti_cursor(layer); 172 struct sti_cursor *cursor = to_sti_cursor(plane);
169 struct drm_display_mode *mode = layer->mode; 173 struct drm_display_mode *mode = plane->mode;
170 u32 ydo, xdo; 174 u32 ydo, xdo;
171 175
172 dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 176 dev_dbg(cursor->dev, "%s %s\n", __func__, sti_plane_to_str(plane));
173 177
174 /* Set memory location, size, and position */ 178 /* Set memory location, size, and position */
175 writel(cursor->pixmap.paddr, layer->regs + CUR_PML); 179 writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
176 writel(cursor->width, layer->regs + CUR_PMP); 180 writel(cursor->width, cursor->regs + CUR_PMP);
177 writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE); 181 writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
178 182
179 ydo = sti_vtg_get_line_number(*mode, layer->dst_y); 183 ydo = sti_vtg_get_line_number(*mode, plane->dst_y);
180 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y); 184 xdo = sti_vtg_get_pixel_number(*mode, plane->dst_y);
181 writel((ydo << 16) | xdo, layer->regs + CUR_VPO); 185 writel((ydo << 16) | xdo, cursor->regs + CUR_VPO);
182 186
183 return 0; 187 return 0;
184} 188}
185 189
186static int sti_cursor_disable_layer(struct sti_layer *layer) 190static int sti_cursor_disable_plane(struct sti_plane *plane)
187{ 191{
188 return 0; 192 return 0;
189} 193}
190 194
191static void sti_cursor_init(struct sti_layer *layer) 195static void sti_cursor_init(struct sti_cursor *cursor)
192{ 196{
193 struct sti_cursor *cursor = to_sti_cursor(layer);
194 unsigned short *base = cursor->clut; 197 unsigned short *base = cursor->clut;
195 unsigned int a, r, g, b; 198 unsigned int a, r, g, b;
196 199
@@ -205,16 +208,16 @@ static void sti_cursor_init(struct sti_layer *layer)
205 (b * 5); 208 (b * 5);
206} 209}
207 210
208static const struct sti_layer_funcs cursor_ops = { 211static const struct sti_plane_funcs cursor_plane_ops = {
209 .get_formats = sti_cursor_get_formats, 212 .get_formats = sti_cursor_get_formats,
210 .get_nb_formats = sti_cursor_get_nb_formats, 213 .get_nb_formats = sti_cursor_get_nb_formats,
211 .init = sti_cursor_init, 214 .prepare = sti_cursor_prepare_plane,
212 .prepare = sti_cursor_prepare_layer, 215 .commit = sti_cursor_commit_plane,
213 .commit = sti_cursor_commit_layer, 216 .disable = sti_cursor_disable_plane,
214 .disable = sti_cursor_disable_layer,
215}; 217};
216 218
217struct sti_layer *sti_cursor_create(struct device *dev) 219struct sti_plane *sti_cursor_create(struct device *dev, int desc,
220 void __iomem *baseaddr)
218{ 221{
219 struct sti_cursor *cursor; 222 struct sti_cursor *cursor;
220 223
@@ -236,7 +239,12 @@ struct sti_layer *sti_cursor_create(struct device *dev)
236 return NULL; 239 return NULL;
237 } 240 }
238 241
239 cursor->layer.ops = &cursor_ops; 242 cursor->dev = dev;
243 cursor->regs = baseaddr;
244 cursor->plane.desc = desc;
245 cursor->plane.ops = &cursor_plane_ops;
240 246
241 return (struct sti_layer *)cursor; 247 sti_cursor_init(cursor);
248
249 return &cursor->plane;
242} 250}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 3c9827404f27..db973b705d92 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,7 @@
7#ifndef _STI_CURSOR_H_ 7#ifndef _STI_CURSOR_H_
8#define _STI_CURSOR_H_ 8#define _STI_CURSOR_H_
9 9
10struct sti_layer *sti_cursor_create(struct device *dev); 10struct sti_plane *sti_cursor_create(struct device *dev, int desc,
11 void __iomem *baseaddr);
11 12
12#endif 13#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index 6b641c5a2ec7..a489b04a9abe 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -41,7 +41,7 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
41 DRM_INFO("Failed to prepare/enable compo_aux clk\n"); 41 DRM_INFO("Failed to prepare/enable compo_aux clk\n");
42 } 42 }
43 43
44 sti_mixer_clear_all_layers(mixer); 44 sti_mixer_clear_all_planes(mixer);
45} 45}
46 46
47static void sti_drm_crtc_commit(struct drm_crtc *crtc) 47static void sti_drm_crtc_commit(struct drm_crtc *crtc)
@@ -49,23 +49,21 @@ static void sti_drm_crtc_commit(struct drm_crtc *crtc)
49 struct sti_mixer *mixer = to_sti_mixer(crtc); 49 struct sti_mixer *mixer = to_sti_mixer(crtc);
50 struct device *dev = mixer->dev; 50 struct device *dev = mixer->dev;
51 struct sti_compositor *compo = dev_get_drvdata(dev); 51 struct sti_compositor *compo = dev_get_drvdata(dev);
52 struct sti_layer *layer; 52 struct sti_plane *plane;
53 53
54 if ((!mixer || !compo)) { 54 if ((!mixer || !compo)) {
55 DRM_ERROR("Can not find mixer or compositor)\n"); 55 DRM_ERROR("Can't find mixer or compositor)\n");
56 return; 56 return;
57 } 57 }
58 58
59 /* get GDP which is reserved to the CRTC FB */ 59 /* get GDP which is reserved to the CRTC FB */
60 layer = to_sti_layer(crtc->primary); 60 plane = to_sti_plane(crtc->primary);
61 if (layer) 61 if (!plane)
62 sti_layer_commit(layer); 62 DRM_ERROR("Can't find CRTC dedicated plane (GDP0)\n");
63 else
64 DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
65 63
66 /* Enable layer on mixer */ 64 /* Enable plane on mixer */
67 if (sti_mixer_set_layer_status(mixer, layer, true)) 65 if (sti_mixer_set_plane_status(mixer, plane, true))
68 DRM_ERROR("Can not enable layer at mixer\n"); 66 DRM_ERROR("Cannot enable plane at mixer\n");
69 67
70 drm_crtc_vblank_on(crtc); 68 drm_crtc_vblank_on(crtc);
71} 69}
@@ -122,7 +120,7 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
122 120
123 res = sti_mixer_active_video_area(mixer, &crtc->mode); 121 res = sti_mixer_active_video_area(mixer, &crtc->mode);
124 if (res) { 122 if (res) {
125 DRM_ERROR("Can not set active video area\n"); 123 DRM_ERROR("Can't set active video area\n");
126 return -EINVAL; 124 return -EINVAL;
127 } 125 }
128 126
@@ -164,7 +162,7 @@ sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
164 sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 162 sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
165} 163}
166 164
167static void sti_drm_atomic_begin(struct drm_crtc *crtc) 165static void sti_drm_crtc_atomic_begin(struct drm_crtc *crtc)
168{ 166{
169 struct sti_mixer *mixer = to_sti_mixer(crtc); 167 struct sti_mixer *mixer = to_sti_mixer(crtc);
170 168
@@ -178,7 +176,7 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc)
178 } 176 }
179} 177}
180 178
181static void sti_drm_atomic_flush(struct drm_crtc *crtc) 179static void sti_drm_crtc_atomic_flush(struct drm_crtc *crtc)
182{ 180{
183} 181}
184 182
@@ -191,8 +189,8 @@ static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
191 .mode_set_nofb = sti_drm_crtc_mode_set_nofb, 189 .mode_set_nofb = sti_drm_crtc_mode_set_nofb,
192 .mode_set_base = drm_helper_crtc_mode_set_base, 190 .mode_set_base = drm_helper_crtc_mode_set_base,
193 .disable = sti_drm_crtc_disable, 191 .disable = sti_drm_crtc_disable,
194 .atomic_begin = sti_drm_atomic_begin, 192 .atomic_begin = sti_drm_crtc_atomic_begin,
195 .atomic_flush = sti_drm_atomic_flush, 193 .atomic_flush = sti_drm_crtc_atomic_flush,
196}; 194};
197 195
198static void sti_drm_crtc_destroy(struct drm_crtc *crtc) 196static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
@@ -248,6 +246,8 @@ int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
248 struct sti_compositor *compo = dev_priv->compo; 246 struct sti_compositor *compo = dev_priv->compo;
249 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 247 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
250 248
249 DRM_DEBUG_DRIVER("\n");
250
251 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ? 251 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
252 compo->vtg_main : compo->vtg_aux, 252 compo->vtg_main : compo->vtg_aux,
253 vtg_vblank_nb, crtc)) { 253 vtg_vblank_nb, crtc)) {
@@ -309,7 +309,7 @@ int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
309 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 309 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
310 &sti_crtc_funcs); 310 &sti_crtc_funcs);
311 if (res) { 311 if (res) {
312 DRM_ERROR("Can not initialze CRTC\n"); 312 DRM_ERROR("Can't initialze CRTC\n");
313 return -EINVAL; 313 return -EINVAL;
314 } 314 }
315 315
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
index 5427bc28f205..0d1672204b01 100644
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -8,6 +8,8 @@
8 8
9#include <drm/drmP.h> 9#include <drm/drmP.h>
10#include <drm/drm_atomic_helper.h> 10#include <drm/drm_atomic_helper.h>
11#include <drm/drm_gem_cma_helper.h>
12#include <drm/drm_fb_cma_helper.h>
11#include <drm/drm_plane_helper.h> 13#include <drm/drm_plane_helper.h>
12 14
13#include "sti_compositor.h" 15#include "sti_compositor.h"
@@ -15,120 +17,165 @@
15#include "sti_drm_plane.h" 17#include "sti_drm_plane.h"
16#include "sti_vtg.h" 18#include "sti_vtg.h"
17 19
18/* (Background) < GDP0 < GDP1 < VID0 < VID1 < GDP2 < GDP3 < (ForeGround) */ 20/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
19enum sti_layer_desc sti_layer_default_zorder[] = { 21enum sti_plane_desc sti_plane_default_zorder[] = {
20 STI_GDP_0, 22 STI_GDP_0,
21 STI_GDP_1, 23 STI_GDP_1,
22 STI_VID_0, 24 STI_HQVDP_0,
23 STI_VID_1,
24 STI_GDP_2, 25 STI_GDP_2,
25 STI_GDP_3, 26 STI_GDP_3,
26}; 27};
27 28
28static int 29const char *sti_plane_to_str(struct sti_plane *plane)
29sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
30 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
31 unsigned int crtc_w, unsigned int crtc_h,
32 uint32_t src_x, uint32_t src_y,
33 uint32_t src_w, uint32_t src_h)
34{ 30{
35 struct sti_layer *layer = to_sti_layer(plane); 31 switch (plane->desc) {
36 struct sti_mixer *mixer = to_sti_mixer(crtc); 32 case STI_GDP_0:
33 return "GDP0";
34 case STI_GDP_1:
35 return "GDP1";
36 case STI_GDP_2:
37 return "GDP2";
38 case STI_GDP_3:
39 return "GDP3";
40 case STI_HQVDP_0:
41 return "HQVDP0";
42 case STI_CURSOR:
43 return "CURSOR";
44 default:
45 return "<UNKNOWN PLANE>";
46 }
47}
48EXPORT_SYMBOL(sti_plane_to_str);
49
50static int sti_plane_prepare(struct sti_plane *plane,
51 struct drm_crtc *crtc,
52 struct drm_framebuffer *fb,
53 struct drm_display_mode *mode, int mixer_id,
54 int dest_x, int dest_y, int dest_w, int dest_h,
55 int src_x, int src_y, int src_w, int src_h)
56{
57 struct drm_gem_cma_object *cma_obj;
58 unsigned int i;
37 int res; 59 int res;
38 60
39 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n", 61 if (!plane || !fb || !mode) {
40 crtc->base.id, sti_mixer_to_str(mixer), 62 DRM_ERROR("Null fb, plane or mode\n");
41 plane->base.id, sti_layer_to_str(layer)); 63 return 1;
42 DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y); 64 }
43 65
44 res = sti_mixer_set_layer_depth(mixer, layer); 66 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
45 if (res) { 67 if (!cma_obj) {
46 DRM_ERROR("Can not set layer depth\n"); 68 DRM_ERROR("Can't get CMA GEM object for fb\n");
47 return res; 69 return 1;
48 } 70 }
49 71
50 /* src_x are in 16.16 format. */ 72 plane->fb = fb;
51 res = sti_layer_prepare(layer, crtc, fb, 73 plane->mode = mode;
52 &crtc->mode, mixer->id, 74 plane->mixer_id = mixer_id;
53 crtc_x, crtc_y, crtc_w, crtc_h, 75 plane->dst_x = dest_x;
54 src_x >> 16, src_y >> 16, 76 plane->dst_y = dest_y;
55 src_w >> 16, src_h >> 16); 77 plane->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
56 if (res) { 78 plane->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
57 DRM_ERROR("Layer prepare failed\n"); 79 plane->src_x = src_x;
58 return res; 80 plane->src_y = src_y;
81 plane->src_w = src_w;
82 plane->src_h = src_h;
83 plane->format = fb->pixel_format;
84 plane->vaddr = cma_obj->vaddr;
85 plane->paddr = cma_obj->paddr;
86 for (i = 0; i < 4; i++) {
87 plane->pitches[i] = fb->pitches[i];
88 plane->offsets[i] = fb->offsets[i];
59 } 89 }
60 90
61 res = sti_layer_commit(layer); 91 DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
62 if (res) { 92 sti_plane_to_str(plane),
63 DRM_ERROR("Layer commit failed\n"); 93 plane->mixer_id);
64 return res; 94 DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
95 sti_plane_to_str(plane),
96 plane->dst_w, plane->dst_h, plane->dst_x, plane->dst_y,
97 plane->src_w, plane->src_h, plane->src_x,
98 plane->src_y);
99
100 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
101 (char *)&plane->format, (unsigned long)plane->paddr);
102
103 if (!plane->ops->prepare) {
104 DRM_ERROR("Cannot prepare\n");
105 return 1;
65 } 106 }
66 107
67 res = sti_mixer_set_layer_status(mixer, layer, true); 108 res = plane->ops->prepare(plane, !plane->enabled);
68 if (res) { 109 if (res) {
69 DRM_ERROR("Can not enable layer at mixer\n"); 110 DRM_ERROR("Plane prepare failed\n");
70 return res; 111 return res;
71 } 112 }
72 113
114 plane->enabled = true;
115
73 return 0; 116 return 0;
74} 117}
75 118
76static int sti_drm_disable_plane(struct drm_plane *plane) 119static int sti_plane_commit(struct sti_plane *plane)
77{ 120{
78 struct sti_layer *layer; 121 if (!plane)
79 struct sti_mixer *mixer; 122 return 1;
80 int lay_res, mix_res;
81 123
82 if (!plane->crtc) { 124 if (!plane->ops->commit) {
83 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id); 125 DRM_ERROR("Cannot commit\n");
84 return 0; 126 return 1;
85 } 127 }
86 layer = to_sti_layer(plane);
87 mixer = to_sti_mixer(plane->crtc);
88 128
89 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n", 129 return plane->ops->commit(plane);
90 plane->crtc->base.id, sti_mixer_to_str(mixer), 130}
91 plane->base.id, sti_layer_to_str(layer));
92 131
93 /* Disable layer at mixer level */ 132static int sti_plane_disable(struct sti_plane *plane)
94 mix_res = sti_mixer_set_layer_status(mixer, layer, false); 133{
95 if (mix_res) 134 int res;
96 DRM_ERROR("Can not disable layer at mixer\n");
97 135
98 /* Wait a while to be sure that a Vsync event is received */ 136 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(plane));
99 msleep(WAIT_NEXT_VSYNC_MS); 137 if (!plane)
138 return 1;
100 139
101 /* Then disable layer itself */ 140 if (!plane->enabled)
102 lay_res = sti_layer_disable(layer); 141 return 0;
103 if (lay_res)
104 DRM_ERROR("Layer disable failed\n");
105 142
106 if (lay_res || mix_res) 143 if (!plane->ops->disable) {
107 return -EINVAL; 144 DRM_ERROR("Cannot disable\n");
145 return 1;
146 }
147
148 res = plane->ops->disable(plane);
149 if (res) {
150 DRM_ERROR("Plane disable failed\n");
151 return res;
152 }
153
154 plane->enabled = false;
108 155
109 return 0; 156 return 0;
110} 157}
111 158
112static void sti_drm_plane_destroy(struct drm_plane *plane) 159static void sti_drm_plane_destroy(struct drm_plane *drm_plane)
113{ 160{
114 DRM_DEBUG_DRIVER("\n"); 161 DRM_DEBUG_DRIVER("\n");
115 162
116 drm_plane_helper_disable(plane); 163 drm_plane_helper_disable(drm_plane);
117 drm_plane_cleanup(plane); 164 drm_plane_cleanup(drm_plane);
118} 165}
119 166
120static int sti_drm_plane_set_property(struct drm_plane *plane, 167static int sti_drm_plane_set_property(struct drm_plane *drm_plane,
121 struct drm_property *property, 168 struct drm_property *property,
122 uint64_t val) 169 uint64_t val)
123{ 170{
124 struct drm_device *dev = plane->dev; 171 struct drm_device *dev = drm_plane->dev;
125 struct sti_drm_private *private = dev->dev_private; 172 struct sti_drm_private *private = dev->dev_private;
126 struct sti_layer *layer = to_sti_layer(plane); 173 struct sti_plane *plane = to_sti_plane(drm_plane);
127 174
128 DRM_DEBUG_DRIVER("\n"); 175 DRM_DEBUG_DRIVER("\n");
129 176
130 if (property == private->plane_zorder_property) { 177 if (property == private->plane_zorder_property) {
131 layer->zorder = val; 178 plane->zorder = val;
132 return 0; 179 return 0;
133 } 180 }
134 181
@@ -145,57 +192,105 @@ static struct drm_plane_funcs sti_drm_plane_funcs = {
145 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 192 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
146}; 193};
147 194
148static int sti_drm_plane_prepare_fb(struct drm_plane *plane, 195static int sti_drm_plane_atomic_check(struct drm_plane *drm_plane,
149 struct drm_framebuffer *fb,
150 const struct drm_plane_state *new_state)
151{
152 return 0;
153}
154
155static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
156 struct drm_framebuffer *fb,
157 const struct drm_plane_state *old_fb)
158{
159}
160
161static int sti_drm_plane_atomic_check(struct drm_plane *plane,
162 struct drm_plane_state *state) 196 struct drm_plane_state *state)
163{ 197{
164 return 0; 198 return 0;
165} 199}
166 200
167static void sti_drm_plane_atomic_update(struct drm_plane *plane, 201static void sti_drm_plane_atomic_update(struct drm_plane *drm_plane,
168 struct drm_plane_state *oldstate) 202 struct drm_plane_state *oldstate)
169{ 203{
170 struct drm_plane_state *state = plane->state; 204 struct drm_plane_state *state = drm_plane->state;
205 struct sti_plane *plane = to_sti_plane(drm_plane);
206 struct sti_mixer *mixer = to_sti_mixer(state->crtc);
207 int res;
208
209 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
210 state->crtc->base.id, sti_mixer_to_str(mixer),
211 drm_plane->base.id, sti_plane_to_str(plane));
212 DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n",
213 state->crtc_w, state->crtc_h,
214 state->crtc_x, state->crtc_y);
215
216 res = sti_mixer_set_plane_depth(mixer, plane);
217 if (res) {
218 DRM_ERROR("Cannot set plane depth\n");
219 return;
220 }
171 221
172 sti_drm_update_plane(plane, state->crtc, state->fb, 222 /* src_x are in 16.16 format */
173 state->crtc_x, state->crtc_y, 223 res = sti_plane_prepare(plane, state->crtc, state->fb,
174 state->crtc_w, state->crtc_h, 224 &state->crtc->mode, mixer->id,
175 state->src_x, state->src_y, 225 state->crtc_x, state->crtc_y,
176 state->src_w, state->src_h); 226 state->crtc_w, state->crtc_h,
227 state->src_x >> 16, state->src_y >> 16,
228 state->src_w >> 16, state->src_h >> 16);
229 if (res) {
230 DRM_ERROR("Plane prepare failed\n");
231 return;
232 }
233
234 res = sti_plane_commit(plane);
235 if (res) {
236 DRM_ERROR("Plane commit failed\n");
237 return;
238 }
239
240 res = sti_mixer_set_plane_status(mixer, plane, true);
241 if (res) {
242 DRM_ERROR("Cannot enable plane at mixer\n");
243 return;
244 }
177} 245}
178 246
179static void sti_drm_plane_atomic_disable(struct drm_plane *plane, 247static void sti_drm_plane_atomic_disable(struct drm_plane *drm_plane,
180 struct drm_plane_state *oldstate) 248 struct drm_plane_state *oldstate)
181{ 249{
182 sti_drm_disable_plane(plane); 250 struct sti_plane *plane = to_sti_plane(drm_plane);
251 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
252 int res;
253
254 if (!drm_plane->crtc) {
255 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
256 drm_plane->base.id);
257 return;
258 }
259
260 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
261 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
262 drm_plane->base.id, sti_plane_to_str(plane));
263
264 /* Disable plane at mixer level */
265 res = sti_mixer_set_plane_status(mixer, plane, false);
266 if (res) {
267 DRM_ERROR("Cannot disable plane at mixer\n");
268 return;
269 }
270
271 /* Wait a while to be sure that a Vsync event is received */
272 msleep(WAIT_NEXT_VSYNC_MS);
273
274 /* Then disable plane itself */
275 res = sti_plane_disable(plane);
276 if (res) {
277 DRM_ERROR("Plane disable failed\n");
278 return;
279 }
183} 280}
184 281
185static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = { 282static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
186 .prepare_fb = sti_drm_plane_prepare_fb,
187 .cleanup_fb = sti_drm_plane_cleanup_fb,
188 .atomic_check = sti_drm_plane_atomic_check, 283 .atomic_check = sti_drm_plane_atomic_check,
189 .atomic_update = sti_drm_plane_atomic_update, 284 .atomic_update = sti_drm_plane_atomic_update,
190 .atomic_disable = sti_drm_plane_atomic_disable, 285 .atomic_disable = sti_drm_plane_atomic_disable,
191}; 286};
192 287
193static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane) 288static void sti_drm_plane_attach_zorder_property(struct drm_plane *drm_plane)
194{ 289{
195 struct drm_device *dev = plane->dev; 290 struct drm_device *dev = drm_plane->dev;
196 struct sti_drm_private *private = dev->dev_private; 291 struct sti_drm_private *private = dev->dev_private;
292 struct sti_plane *plane = to_sti_plane(drm_plane);
197 struct drm_property *prop; 293 struct drm_property *prop;
198 struct sti_layer *layer = to_sti_layer(plane);
199 294
200 prop = private->plane_zorder_property; 295 prop = private->plane_zorder_property;
201 if (!prop) { 296 if (!prop) {
@@ -207,40 +302,43 @@ static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane)
207 private->plane_zorder_property = prop; 302 private->plane_zorder_property = prop;
208 } 303 }
209 304
210 drm_object_attach_property(&plane->base, prop, layer->zorder); 305 drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
211} 306}
212 307
213struct drm_plane *sti_drm_plane_init(struct drm_device *dev, 308struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
214 struct sti_layer *layer, 309 struct sti_plane *plane,
215 unsigned int possible_crtcs, 310 unsigned int possible_crtcs,
216 enum drm_plane_type type) 311 enum drm_plane_type type)
217{ 312{
218 int err, i; 313 int err, i;
219 314
220 err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs, 315 err = drm_universal_plane_init(dev, &plane->drm_plane,
221 &sti_drm_plane_funcs, 316 possible_crtcs,
222 sti_layer_get_formats(layer), 317 &sti_drm_plane_funcs,
223 sti_layer_get_nb_formats(layer), type); 318 plane->ops->get_formats(plane),
319 plane->ops->get_nb_formats(plane),
320 type);
224 if (err) { 321 if (err) {
225 DRM_ERROR("Failed to initialize plane\n"); 322 DRM_ERROR("Failed to initialize universal plane\n");
226 return NULL; 323 return NULL;
227 } 324 }
228 325
229 drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs); 326 drm_plane_helper_add(&plane->drm_plane,
327 &sti_drm_plane_helpers_funcs);
230 328
231 for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++) 329 for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
232 if (sti_layer_default_zorder[i] == layer->desc) 330 if (sti_plane_default_zorder[i] == plane->desc)
233 break; 331 break;
234 332
235 layer->zorder = i + 1; 333 plane->zorder = i + 1;
236 334
237 if (type == DRM_PLANE_TYPE_OVERLAY) 335 if (type == DRM_PLANE_TYPE_OVERLAY)
238 sti_drm_plane_attach_zorder_property(&layer->plane); 336 sti_drm_plane_attach_zorder_property(&plane->drm_plane);
239 337
240 DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n", 338 DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
241 layer->plane.base.id, 339 plane->drm_plane.base.id,
242 sti_layer_to_str(layer), layer->zorder); 340 sti_plane_to_str(plane), plane->zorder);
243 341
244 return &layer->plane; 342 return &plane->drm_plane;
245} 343}
246EXPORT_SYMBOL(sti_drm_plane_init); 344EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
index 4f191839f2a7..e5473661c85a 100644
--- a/drivers/gpu/drm/sti/sti_drm_plane.h
+++ b/drivers/gpu/drm/sti/sti_drm_plane.h
@@ -9,10 +9,97 @@
9 9
10#include <drm/drmP.h> 10#include <drm/drmP.h>
11 11
12struct sti_layer; 12#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
13
14#define STI_PLANE_TYPE_SHIFT 8
15#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
16
17enum sti_plane_type {
18 STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
19 STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
20 STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
21 STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
22};
23
24enum sti_plane_id_of_type {
25 STI_ID_0 = 0,
26 STI_ID_1 = 1,
27 STI_ID_2 = 2,
28 STI_ID_3 = 3
29};
30
31enum sti_plane_desc {
32 STI_GDP_0 = STI_GDP | STI_ID_0,
33 STI_GDP_1 = STI_GDP | STI_ID_1,
34 STI_GDP_2 = STI_GDP | STI_ID_2,
35 STI_GDP_3 = STI_GDP | STI_ID_3,
36 STI_HQVDP_0 = STI_VDP | STI_ID_0,
37 STI_CURSOR = STI_CUR,
38 STI_BACK = STI_BCK
39};
40
41/**
42 * STI plane structure
43 *
44 * @plane: drm plane it is bound to (if any)
45 * @fb: drm fb it is bound to
46 * @mode: display mode
47 * @desc: plane type & id
48 * @ops: plane functions
49 * @zorder: plane z-order
50 * @mixer_id: id of the mixer used to display the plane
51 * @enabled: to know if the plane is active or not
52 * @src_x src_y: coordinates of the input (fb) area
53 * @src_w src_h: size of the input (fb) area
54 * @dst_x dst_y: coordinates of the output (crtc) area
55 * @dst_w dst_h: size of the output (crtc) area
56 * @format: format
57 * @pitches: pitch of 'planes' (eg: Y, U, V)
58 * @offsets: offset of 'planes'
59 * @vaddr: virtual address of the input buffer
60 * @paddr: physical address of the input buffer
61 */
62struct sti_plane {
63 struct drm_plane drm_plane;
64 struct drm_framebuffer *fb;
65 struct drm_display_mode *mode;
66 enum sti_plane_desc desc;
67 const struct sti_plane_funcs *ops;
68 int zorder;
69 int mixer_id;
70 bool enabled;
71 int src_x, src_y;
72 int src_w, src_h;
73 int dst_x, dst_y;
74 int dst_w, dst_h;
75 uint32_t format;
76 unsigned int pitches[4];
77 unsigned int offsets[4];
78 void *vaddr;
79 dma_addr_t paddr;
80};
81
82/**
83 * STI plane functions structure
84 *
85 * @get_formats: get plane supported formats
86 * @get_nb_formats: get number of format supported
87 * @prepare: prepare plane before rendering
88 * @commit: set plane for rendering
89 * @disable: disable plane
90 */
91struct sti_plane_funcs {
92 const uint32_t* (*get_formats)(struct sti_plane *plane);
93 unsigned int (*get_nb_formats)(struct sti_plane *plane);
94 int (*prepare)(struct sti_plane *plane, bool first_prepare);
95 int (*commit)(struct sti_plane *plane);
96 int (*disable)(struct sti_plane *plane);
97};
13 98
14struct drm_plane *sti_drm_plane_init(struct drm_device *dev, 99struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
15 struct sti_layer *layer, 100 struct sti_plane *sti_plane,
16 unsigned int possible_crtcs, 101 unsigned int possible_crtcs,
17 enum drm_plane_type type); 102 enum drm_plane_type type);
103const char *sti_plane_to_str(struct sti_plane *plane);
104
18#endif 105#endif
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 087906fd8846..e94d0be3c84f 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -10,8 +10,8 @@
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11 11
12#include "sti_compositor.h" 12#include "sti_compositor.h"
13#include "sti_drm_plane.h"
13#include "sti_gdp.h" 14#include "sti_gdp.h"
14#include "sti_layer.h"
15#include "sti_vtg.h" 15#include "sti_vtg.h"
16 16
17#define ALPHASWITCH BIT(6) 17#define ALPHASWITCH BIT(6)
@@ -85,16 +85,20 @@ struct sti_gdp_node_list {
85/** 85/**
86 * STI GDP structure 86 * STI GDP structure
87 * 87 *
88 * @layer: layer structure 88 * @sti_plane: sti_plane structure
89 * @dev: driver device
90 * @regs: gdp registers
89 * @clk_pix: pixel clock for the current gdp 91 * @clk_pix: pixel clock for the current gdp
90 * @clk_main_parent: gdp parent clock if main path used 92 * @clk_main_parent: gdp parent clock if main path used
91 * @clk_aux_parent: gdp parent clock if aux path used 93 * @clk_aux_parent: gdp parent clock if aux path used
92 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification 94 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
93 * @is_curr_top: true if the current node processed is the top field 95 * @is_curr_top: true if the current node processed is the top field
94 * @node_list: array of node list 96 * @node_list: array of node list
95 */ 97 */
96struct sti_gdp { 98struct sti_gdp {
97 struct sti_layer layer; 99 struct sti_plane plane;
100 struct device *dev;
101 void __iomem *regs;
98 struct clk *clk_pix; 102 struct clk *clk_pix;
99 struct clk *clk_main_parent; 103 struct clk *clk_main_parent;
100 struct clk *clk_aux_parent; 104 struct clk *clk_aux_parent;
@@ -103,7 +107,7 @@ struct sti_gdp {
103 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; 107 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
104}; 108};
105 109
106#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer) 110#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
107 111
108static const uint32_t gdp_supported_formats[] = { 112static const uint32_t gdp_supported_formats[] = {
109 DRM_FORMAT_XRGB8888, 113 DRM_FORMAT_XRGB8888,
@@ -120,12 +124,12 @@ static const uint32_t gdp_supported_formats[] = {
120 DRM_FORMAT_C8, 124 DRM_FORMAT_C8,
121}; 125};
122 126
123static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer) 127static const uint32_t *sti_gdp_get_formats(struct sti_plane *plane)
124{ 128{
125 return gdp_supported_formats; 129 return gdp_supported_formats;
126} 130}
127 131
128static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer) 132static unsigned int sti_gdp_get_nb_formats(struct sti_plane *plane)
129{ 133{
130 return ARRAY_SIZE(gdp_supported_formats); 134 return ARRAY_SIZE(gdp_supported_formats);
131} 135}
@@ -175,20 +179,20 @@ static int sti_gdp_get_alpharange(int format)
175 179
176/** 180/**
177 * sti_gdp_get_free_nodes 181 * sti_gdp_get_free_nodes
178 * @layer: gdp layer 182 * @plane: gdp plane
179 * 183 *
180 * Look for a GDP node list that is not currently read by the HW. 184 * Look for a GDP node list that is not currently read by the HW.
181 * 185 *
182 * RETURNS: 186 * RETURNS:
183 * Pointer to the free GDP node list 187 * Pointer to the free GDP node list
184 */ 188 */
185static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) 189static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_plane *plane)
186{ 190{
187 int hw_nvn; 191 int hw_nvn;
188 struct sti_gdp *gdp = to_sti_gdp(layer); 192 struct sti_gdp *gdp = to_sti_gdp(plane);
189 unsigned int i; 193 unsigned int i;
190 194
191 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 195 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
192 if (!hw_nvn) 196 if (!hw_nvn)
193 goto end; 197 goto end;
194 198
@@ -199,7 +203,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
199 203
200 /* in hazardious cases restart with the first node */ 204 /* in hazardious cases restart with the first node */
201 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", 205 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
202 sti_layer_to_str(layer), hw_nvn); 206 sti_plane_to_str(plane), hw_nvn);
203 207
204end: 208end:
205 return &gdp->node_list[0]; 209 return &gdp->node_list[0];
@@ -207,7 +211,7 @@ end:
207 211
208/** 212/**
209 * sti_gdp_get_current_nodes 213 * sti_gdp_get_current_nodes
210 * @layer: GDP layer 214 * @plane: GDP plane
211 * 215 *
212 * Look for GDP nodes that are currently read by the HW. 216 * Look for GDP nodes that are currently read by the HW.
213 * 217 *
@@ -215,13 +219,13 @@ end:
215 * Pointer to the current GDP node list 219 * Pointer to the current GDP node list
216 */ 220 */
217static 221static
218struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) 222struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_plane *plane)
219{ 223{
220 int hw_nvn; 224 int hw_nvn;
221 struct sti_gdp *gdp = to_sti_gdp(layer); 225 struct sti_gdp *gdp = to_sti_gdp(plane);
222 unsigned int i; 226 unsigned int i;
223 227
224 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 228 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
225 if (!hw_nvn) 229 if (!hw_nvn)
226 goto end; 230 goto end;
227 231
@@ -232,28 +236,28 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
232 236
233end: 237end:
234 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n", 238 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
235 hw_nvn, sti_layer_to_str(layer)); 239 hw_nvn, sti_plane_to_str(plane));
236 240
237 return NULL; 241 return NULL;
238} 242}
239 243
240/** 244/**
241 * sti_gdp_prepare_layer 245 * sti_gdp_prepare
242 * @lay: gdp layer 246 * @plane: gdp plane
243 * @first_prepare: true if it is the first time this function is called 247 * @first_prepare: true if it is the first time this function is called
244 * 248 *
245 * Update the free GDP node list according to the layer properties. 249 * Update the free GDP node list according to the plane properties.
246 * 250 *
247 * RETURNS: 251 * RETURNS:
248 * 0 on success. 252 * 0 on success.
249 */ 253 */
250static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare) 254static int sti_gdp_prepare(struct sti_plane *plane, bool first_prepare)
251{ 255{
252 struct sti_gdp_node_list *list; 256 struct sti_gdp_node_list *list;
253 struct sti_gdp_node *top_field, *btm_field; 257 struct sti_gdp_node *top_field, *btm_field;
254 struct drm_display_mode *mode = layer->mode; 258 struct drm_display_mode *mode = plane->mode;
255 struct device *dev = layer->dev; 259 struct sti_gdp *gdp = to_sti_gdp(plane);
256 struct sti_gdp *gdp = to_sti_gdp(layer); 260 struct device *dev = gdp->dev;
257 struct sti_compositor *compo = dev_get_drvdata(dev); 261 struct sti_compositor *compo = dev_get_drvdata(dev);
258 int format; 262 int format;
259 unsigned int depth, bpp; 263 unsigned int depth, bpp;
@@ -261,20 +265,20 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
261 int res; 265 int res;
262 u32 ydo, xdo, yds, xds; 266 u32 ydo, xdo, yds, xds;
263 267
264 list = sti_gdp_get_free_nodes(layer); 268 list = sti_gdp_get_free_nodes(plane);
265 top_field = list->top_field; 269 top_field = list->top_field;
266 btm_field = list->btm_field; 270 btm_field = list->btm_field;
267 271
268 dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__, 272 dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
269 sti_layer_to_str(layer), top_field, btm_field); 273 sti_plane_to_str(plane), top_field, btm_field);
270 274
271 /* Build the top field from layer params */ 275 /* Build the top field from plane params */
272 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; 276 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
273 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; 277 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
274 format = sti_gdp_fourcc2format(layer->format); 278 format = sti_gdp_fourcc2format(plane->format);
275 if (format == -1) { 279 if (format == -1) {
276 DRM_ERROR("Format not supported by GDP %.4s\n", 280 DRM_ERROR("Format not supported by GDP %.4s\n",
277 (char *)&layer->format); 281 (char *)&plane->format);
278 return 1; 282 return 1;
279 } 283 }
280 top_field->gam_gdp_ctl |= format; 284 top_field->gam_gdp_ctl |= format;
@@ -282,22 +286,22 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
282 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; 286 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
283 287
284 /* pixel memory location */ 288 /* pixel memory location */
285 drm_fb_get_bpp_depth(layer->format, &depth, &bpp); 289 drm_fb_get_bpp_depth(plane->format, &depth, &bpp);
286 top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0]; 290 top_field->gam_gdp_pml = (u32)plane->paddr + plane->offsets[0];
287 top_field->gam_gdp_pml += layer->src_x * (bpp >> 3); 291 top_field->gam_gdp_pml += plane->src_x * (bpp >> 3);
288 top_field->gam_gdp_pml += layer->src_y * layer->pitches[0]; 292 top_field->gam_gdp_pml += plane->src_y * plane->pitches[0];
289 293
290 /* input parameters */ 294 /* input parameters */
291 top_field->gam_gdp_pmp = layer->pitches[0]; 295 top_field->gam_gdp_pmp = plane->pitches[0];
292 top_field->gam_gdp_size = 296 top_field->gam_gdp_size =
293 clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 | 297 clamp_val(plane->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
294 clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX); 298 clamp_val(plane->src_w, 0, GAM_GDP_SIZE_MAX);
295 299
296 /* output parameters */ 300 /* output parameters */
297 ydo = sti_vtg_get_line_number(*mode, layer->dst_y); 301 ydo = sti_vtg_get_line_number(*mode, plane->dst_y);
298 yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1); 302 yds = sti_vtg_get_line_number(*mode, plane->dst_y + plane->dst_h - 1);
299 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x); 303 xdo = sti_vtg_get_pixel_number(*mode, plane->dst_x);
300 xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1); 304 xds = sti_vtg_get_pixel_number(*mode, plane->dst_x + plane->dst_w - 1);
301 top_field->gam_gdp_vpo = (ydo << 16) | xdo; 305 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
302 top_field->gam_gdp_vps = (yds << 16) | xds; 306 top_field->gam_gdp_vps = (yds << 16) | xds;
303 307
@@ -307,15 +311,15 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
307 btm_field->gam_gdp_nvn = list->top_field_paddr; 311 btm_field->gam_gdp_nvn = list->top_field_paddr;
308 312
309 /* Interlaced mode */ 313 /* Interlaced mode */
310 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) 314 if (plane->mode->flags & DRM_MODE_FLAG_INTERLACE)
311 btm_field->gam_gdp_pml = top_field->gam_gdp_pml + 315 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
312 layer->pitches[0]; 316 plane->pitches[0];
313 317
314 if (first_prepare) { 318 if (first_prepare) {
315 /* Register gdp callback */ 319 /* Register gdp callback */
316 if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ? 320 if (sti_vtg_register_client(plane->mixer_id == STI_MIXER_MAIN ?
317 compo->vtg_main : compo->vtg_aux, 321 compo->vtg_main : compo->vtg_aux,
318 &gdp->vtg_field_nb, layer->mixer_id)) { 322 &gdp->vtg_field_nb, plane->mixer_id)) {
319 DRM_ERROR("Cannot register VTG notifier\n"); 323 DRM_ERROR("Cannot register VTG notifier\n");
320 return 1; 324 return 1;
321 } 325 }
@@ -325,7 +329,7 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
325 struct clk *clkp; 329 struct clk *clkp;
326 /* According to the mixer used, the gdp pixel clock 330 /* According to the mixer used, the gdp pixel clock
327 * should have a different parent clock. */ 331 * should have a different parent clock. */
328 if (layer->mixer_id == STI_MIXER_MAIN) 332 if (plane->mixer_id == STI_MIXER_MAIN)
329 clkp = gdp->clk_main_parent; 333 clkp = gdp->clk_main_parent;
330 else 334 else
331 clkp = gdp->clk_aux_parent; 335 clkp = gdp->clk_aux_parent;
@@ -351,8 +355,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
351} 355}
352 356
353/** 357/**
354 * sti_gdp_commit_layer 358 * sti_gdp_commit
355 * @lay: gdp layer 359 * @plane: gdp plane
356 * 360 *
357 * Update the NVN field of the 'right' field of the current GDP node (being 361 * Update the NVN field of the 'right' field of the current GDP node (being
358 * used by the HW) with the address of the updated ('free') top field GDP node. 362 * used by the HW) with the address of the updated ('free') top field GDP node.
@@ -365,38 +369,38 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
365 * RETURNS: 369 * RETURNS:
366 * 0 on success. 370 * 0 on success.
367 */ 371 */
368static int sti_gdp_commit_layer(struct sti_layer *layer) 372static int sti_gdp_commit(struct sti_plane *plane)
369{ 373{
370 struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer); 374 struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(plane);
371 struct sti_gdp_node *updated_top_node = updated_list->top_field; 375 struct sti_gdp_node *updated_top_node = updated_list->top_field;
372 struct sti_gdp_node *updated_btm_node = updated_list->btm_field; 376 struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
373 struct sti_gdp *gdp = to_sti_gdp(layer); 377 struct sti_gdp *gdp = to_sti_gdp(plane);
374 u32 dma_updated_top = updated_list->top_field_paddr; 378 u32 dma_updated_top = updated_list->top_field_paddr;
375 u32 dma_updated_btm = updated_list->btm_field_paddr; 379 u32 dma_updated_btm = updated_list->btm_field_paddr;
376 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); 380 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(plane);
377 381
378 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, 382 dev_dbg(gdp->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
379 sti_layer_to_str(layer), 383 sti_plane_to_str(plane),
380 updated_top_node, updated_btm_node); 384 updated_top_node, updated_btm_node);
381 dev_dbg(layer->dev, "Current NVN:0x%X\n", 385 dev_dbg(gdp->dev, "Current NVN:0x%X\n",
382 readl(layer->regs + GAM_GDP_NVN_OFFSET)); 386 readl(gdp->regs + GAM_GDP_NVN_OFFSET));
383 dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n", 387 dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
384 (unsigned long)layer->paddr, 388 (unsigned long)plane->paddr,
385 readl(layer->regs + GAM_GDP_PML_OFFSET)); 389 readl(gdp->regs + GAM_GDP_PML_OFFSET));
386 390
387 if (curr_list == NULL) { 391 if (curr_list == NULL) {
388 /* First update or invalid node should directly write in the 392 /* First update or invalid node should directly write in the
389 * hw register */ 393 * hw register */
390 DRM_DEBUG_DRIVER("%s first update (or invalid node)", 394 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
391 sti_layer_to_str(layer)); 395 sti_plane_to_str(plane));
392 396
393 writel(gdp->is_curr_top == true ? 397 writel(gdp->is_curr_top == true ?
394 dma_updated_btm : dma_updated_top, 398 dma_updated_btm : dma_updated_top,
395 layer->regs + GAM_GDP_NVN_OFFSET); 399 gdp->regs + GAM_GDP_NVN_OFFSET);
396 return 0; 400 return 0;
397 } 401 }
398 402
399 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) { 403 if (plane->mode->flags & DRM_MODE_FLAG_INTERLACE) {
400 if (gdp->is_curr_top == true) { 404 if (gdp->is_curr_top == true) {
401 /* Do not update in the middle of the frame, but 405 /* Do not update in the middle of the frame, but
402 * postpone the update after the bottom field has 406 * postpone the update after the bottom field has
@@ -405,32 +409,32 @@ static int sti_gdp_commit_layer(struct sti_layer *layer)
405 } else { 409 } else {
406 /* Direct update to avoid one frame delay */ 410 /* Direct update to avoid one frame delay */
407 writel(dma_updated_top, 411 writel(dma_updated_top,
408 layer->regs + GAM_GDP_NVN_OFFSET); 412 gdp->regs + GAM_GDP_NVN_OFFSET);
409 } 413 }
410 } else { 414 } else {
411 /* Direct update for progressive to avoid one frame delay */ 415 /* Direct update for progressive to avoid one frame delay */
412 writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET); 416 writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
413 } 417 }
414 418
415 return 0; 419 return 0;
416} 420}
417 421
418/** 422/**
419 * sti_gdp_disable_layer 423 * sti_gdp_disable
420 * @lay: gdp layer 424 * @plane: gdp plane
421 * 425 *
422 * Disable a GDP. 426 * Disable a GDP.
423 * 427 *
424 * RETURNS: 428 * RETURNS:
425 * 0 on success. 429 * 0 on success.
426 */ 430 */
427static int sti_gdp_disable_layer(struct sti_layer *layer) 431static int sti_gdp_disable(struct sti_plane *plane)
428{ 432{
429 unsigned int i; 433 unsigned int i;
430 struct sti_gdp *gdp = to_sti_gdp(layer); 434 struct sti_gdp *gdp = to_sti_gdp(plane);
431 struct sti_compositor *compo = dev_get_drvdata(layer->dev); 435 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
432 436
433 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 437 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(plane));
434 438
435 /* Set the nodes as 'to be ignored on mixer' */ 439 /* Set the nodes as 'to be ignored on mixer' */
436 for (i = 0; i < GDP_NODE_NB_BANK; i++) { 440 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@@ -438,7 +442,7 @@ static int sti_gdp_disable_layer(struct sti_layer *layer)
438 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; 442 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
439 } 443 }
440 444
441 if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ? 445 if (sti_vtg_unregister_client(plane->mixer_id == STI_MIXER_MAIN ?
442 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb)) 446 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
443 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 447 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
444 448
@@ -479,10 +483,9 @@ int sti_gdp_field_cb(struct notifier_block *nb,
479 return 0; 483 return 0;
480} 484}
481 485
482static void sti_gdp_init(struct sti_layer *layer) 486static void sti_gdp_init(struct sti_gdp *gdp)
483{ 487{
484 struct sti_gdp *gdp = to_sti_gdp(layer); 488 struct device_node *np = gdp->dev->of_node;
485 struct device_node *np = layer->dev->of_node;
486 dma_addr_t dma_addr; 489 dma_addr_t dma_addr;
487 void *base; 490 void *base;
488 unsigned int i, size; 491 unsigned int i, size;
@@ -490,8 +493,8 @@ static void sti_gdp_init(struct sti_layer *layer)
490 /* Allocate all the nodes within a single memory page */ 493 /* Allocate all the nodes within a single memory page */
491 size = sizeof(struct sti_gdp_node) * 494 size = sizeof(struct sti_gdp_node) *
492 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; 495 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
493 base = dma_alloc_writecombine(layer->dev, 496 base = dma_alloc_writecombine(gdp->dev,
494 size, &dma_addr, GFP_KERNEL | GFP_DMA); 497 size, &dma_addr, GFP_KERNEL | GFP_DMA);
495 498
496 if (!base) { 499 if (!base) {
497 DRM_ERROR("Failed to allocate memory for GDP node\n"); 500 DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -526,7 +529,7 @@ static void sti_gdp_init(struct sti_layer *layer)
526 /* GDP of STiH407 chip have its own pixel clock */ 529 /* GDP of STiH407 chip have its own pixel clock */
527 char *clk_name; 530 char *clk_name;
528 531
529 switch (layer->desc) { 532 switch (gdp->plane.desc) {
530 case STI_GDP_0: 533 case STI_GDP_0:
531 clk_name = "pix_gdp1"; 534 clk_name = "pix_gdp1";
532 break; 535 break;
@@ -544,30 +547,30 @@ static void sti_gdp_init(struct sti_layer *layer)
544 return; 547 return;
545 } 548 }
546 549
547 gdp->clk_pix = devm_clk_get(layer->dev, clk_name); 550 gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
548 if (IS_ERR(gdp->clk_pix)) 551 if (IS_ERR(gdp->clk_pix))
549 DRM_ERROR("Cannot get %s clock\n", clk_name); 552 DRM_ERROR("Cannot get %s clock\n", clk_name);
550 553
551 gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent"); 554 gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
552 if (IS_ERR(gdp->clk_main_parent)) 555 if (IS_ERR(gdp->clk_main_parent))
553 DRM_ERROR("Cannot get main_parent clock\n"); 556 DRM_ERROR("Cannot get main_parent clock\n");
554 557
555 gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent"); 558 gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
556 if (IS_ERR(gdp->clk_aux_parent)) 559 if (IS_ERR(gdp->clk_aux_parent))
557 DRM_ERROR("Cannot get aux_parent clock\n"); 560 DRM_ERROR("Cannot get aux_parent clock\n");
558 } 561 }
559} 562}
560 563
561static const struct sti_layer_funcs gdp_ops = { 564static const struct sti_plane_funcs gdp_plane_ops = {
562 .get_formats = sti_gdp_get_formats, 565 .get_formats = sti_gdp_get_formats,
563 .get_nb_formats = sti_gdp_get_nb_formats, 566 .get_nb_formats = sti_gdp_get_nb_formats,
564 .init = sti_gdp_init, 567 .prepare = sti_gdp_prepare,
565 .prepare = sti_gdp_prepare_layer, 568 .commit = sti_gdp_commit,
566 .commit = sti_gdp_commit_layer, 569 .disable = sti_gdp_disable,
567 .disable = sti_gdp_disable_layer,
568}; 570};
569 571
570struct sti_layer *sti_gdp_create(struct device *dev, int id) 572struct sti_plane *sti_gdp_create(struct device *dev, int desc,
573 void __iomem *baseaddr)
571{ 574{
572 struct sti_gdp *gdp; 575 struct sti_gdp *gdp;
573 576
@@ -577,8 +580,14 @@ struct sti_layer *sti_gdp_create(struct device *dev, int id)
577 return NULL; 580 return NULL;
578 } 581 }
579 582
580 gdp->layer.ops = &gdp_ops; 583 gdp->dev = dev;
584 gdp->regs = baseaddr;
585 gdp->plane.desc = desc;
586 gdp->plane.ops = &gdp_plane_ops;
587
581 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb; 588 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
582 589
583 return (struct sti_layer *)gdp; 590 sti_gdp_init(gdp);
591
592 return &gdp->plane;
584} 593}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 1dab68274ad3..01818ea72125 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14struct sti_layer *sti_gdp_create(struct device *dev, int id); 14struct sti_plane *sti_gdp_create(struct device *dev, int desc,
15 void __iomem *baseaddr);
15 16
16#endif 17#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 06595e902526..09e29e43423e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -588,7 +588,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
588 return count; 588 return count;
589 589
590fail: 590fail:
591 DRM_ERROR("Can not read HDMI EDID\n"); 591 DRM_ERROR("Can't read HDMI EDID\n");
592 return 0; 592 return 0;
593} 593}
594 594
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b0eb62de1b2e..54e8c2f06cf4 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -14,9 +14,7 @@
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15 15
16#include "sti_drm_plane.h" 16#include "sti_drm_plane.h"
17#include "sti_hqvdp.h"
18#include "sti_hqvdp_lut.h" 17#include "sti_hqvdp_lut.h"
19#include "sti_layer.h"
20#include "sti_vtg.h" 18#include "sti_vtg.h"
21 19
22/* Firmware name */ 20/* Firmware name */
@@ -322,8 +320,7 @@ struct sti_hqvdp_cmd {
322 * @dev: driver device 320 * @dev: driver device
323 * @drm_dev: the drm device 321 * @drm_dev: the drm device
324 * @regs: registers 322 * @regs: registers
325 * @layer: layer structure for hqvdp it self 323 * @plane: plane structure for hqvdp it self
326 * @vid_plane: VID plug used as link with compositor IP
327 * @clk: IP clock 324 * @clk: IP clock
328 * @clk_pix_main: pix main clock 325 * @clk_pix_main: pix main clock
329 * @reset: reset control 326 * @reset: reset control
@@ -334,13 +331,13 @@ struct sti_hqvdp_cmd {
334 * @hqvdp_cmd: buffer of commands 331 * @hqvdp_cmd: buffer of commands
335 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd 332 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
336 * @vtg: vtg for main data path 333 * @vtg: vtg for main data path
334 * @xp70_initialized: true if xp70 is already initialized
337 */ 335 */
338struct sti_hqvdp { 336struct sti_hqvdp {
339 struct device *dev; 337 struct device *dev;
340 struct drm_device *drm_dev; 338 struct drm_device *drm_dev;
341 void __iomem *regs; 339 void __iomem *regs;
342 struct sti_layer layer; 340 struct sti_plane plane;
343 struct drm_plane *vid_plane;
344 struct clk *clk; 341 struct clk *clk;
345 struct clk *clk_pix_main; 342 struct clk *clk_pix_main;
346 struct reset_control *reset; 343 struct reset_control *reset;
@@ -351,20 +348,21 @@ struct sti_hqvdp {
351 void *hqvdp_cmd; 348 void *hqvdp_cmd;
352 dma_addr_t hqvdp_cmd_paddr; 349 dma_addr_t hqvdp_cmd_paddr;
353 struct sti_vtg *vtg; 350 struct sti_vtg *vtg;
351 bool xp70_initialized;
354}; 352};
355 353
356#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer) 354#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
357 355
358static const uint32_t hqvdp_supported_formats[] = { 356static const uint32_t hqvdp_supported_formats[] = {
359 DRM_FORMAT_NV12, 357 DRM_FORMAT_NV12,
360}; 358};
361 359
362static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer) 360static const uint32_t *sti_hqvdp_get_formats(struct sti_plane *plane)
363{ 361{
364 return hqvdp_supported_formats; 362 return hqvdp_supported_formats;
365} 363}
366 364
367static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer) 365static unsigned int sti_hqvdp_get_nb_formats(struct sti_plane *plane)
368{ 366{
369 return ARRAY_SIZE(hqvdp_supported_formats); 367 return ARRAY_SIZE(hqvdp_supported_formats);
370} 368}
@@ -484,7 +482,7 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
484 482
485/** 483/**
486 * sti_hqvdp_check_hw_scaling 484 * sti_hqvdp_check_hw_scaling
487 * @layer: hqvdp layer 485 * @plane: hqvdp plane
488 * 486 *
489 * Check if the HW is able to perform the scaling request 487 * Check if the HW is able to perform the scaling request
490 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where: 488 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@@ -498,23 +496,23 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
498 * RETURNS: 496 * RETURNS:
499 * True if the HW can scale. 497 * True if the HW can scale.
500 */ 498 */
501static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer) 499static bool sti_hqvdp_check_hw_scaling(struct sti_plane *plane)
502{ 500{
503 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 501 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
504 unsigned long lfw; 502 unsigned long lfw;
505 unsigned int inv_zy; 503 unsigned int inv_zy;
506 504
507 lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); 505 lfw = plane->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
508 lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000; 506 lfw /= max(plane->src_w, plane->dst_w) * plane->mode->clock / 1000;
509 507
510 inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h); 508 inv_zy = DIV_ROUND_UP(plane->src_h, plane->dst_h);
511 509
512 return (inv_zy <= lfw) ? true : false; 510 return (inv_zy <= lfw) ? true : false;
513} 511}
514 512
515/** 513/**
516 * sti_hqvdp_prepare_layer 514 * sti_hqvdp_prepare
517 * @layer: hqvdp layer 515 * @plane: hqvdp plane
518 * @first_prepare: true if it is the first time this function is called 516 * @first_prepare: true if it is the first time this function is called
519 * 517 *
520 * Prepares a command for the firmware 518 * Prepares a command for the firmware
@@ -522,22 +520,14 @@ static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
522 * RETURNS: 520 * RETURNS:
523 * 0 on success. 521 * 0 on success.
524 */ 522 */
525static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare) 523static int sti_hqvdp_prepare(struct sti_plane *plane, bool first_prepare)
526{ 524{
527 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 525 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
528 struct sti_hqvdp_cmd *cmd; 526 struct sti_hqvdp_cmd *cmd;
529 int scale_h, scale_v; 527 int scale_h, scale_v;
530 int cmd_offset; 528 int cmd_offset;
531 529
532 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 530 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_plane_to_str(plane));
533
534 /* prepare and commit VID plane */
535 hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
536 layer->crtc, layer->fb,
537 layer->dst_x, layer->dst_y,
538 layer->dst_w, layer->dst_h,
539 layer->src_x, layer->src_y,
540 layer->src_w, layer->src_h);
541 531
542 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 532 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
543 if (cmd_offset == -1) { 533 if (cmd_offset == -1) {
@@ -546,7 +536,7 @@ static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
546 } 536 }
547 cmd = hqvdp->hqvdp_cmd + cmd_offset; 537 cmd = hqvdp->hqvdp_cmd + cmd_offset;
548 538
549 if (!sti_hqvdp_check_hw_scaling(layer)) { 539 if (!sti_hqvdp_check_hw_scaling(plane)) {
550 DRM_ERROR("Scaling beyond HW capabilities\n"); 540 DRM_ERROR("Scaling beyond HW capabilities\n");
551 return -EINVAL; 541 return -EINVAL;
552 } 542 }
@@ -565,42 +555,42 @@ static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
565 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT; 555 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
566 556
567 /* Buffer planes address */ 557 /* Buffer planes address */
568 cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0]; 558 cmd->top.current_luma = (u32)plane->paddr + plane->offsets[0];
569 cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1]; 559 cmd->top.current_chroma = (u32)plane->paddr + plane->offsets[1];
570 560
571 /* Pitches */ 561 /* Pitches */
572 cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch = 562 cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
573 layer->pitches[0]; 563 plane->pitches[0];
574 cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch = 564 cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
575 layer->pitches[1]; 565 plane->pitches[1];
576 566
577 /* Input / output size 567 /* Input / output size
578 * Align to upper even value */ 568 * Align to upper even value */
579 layer->dst_w = ALIGN(layer->dst_w, 2); 569 plane->dst_w = ALIGN(plane->dst_w, 2);
580 layer->dst_h = ALIGN(layer->dst_h, 2); 570 plane->dst_h = ALIGN(plane->dst_h, 2);
581 571
582 if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) || 572 if ((plane->src_w > MAX_WIDTH) || (plane->src_w < MIN_WIDTH) ||
583 (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) || 573 (plane->src_h > MAX_HEIGHT) || (plane->src_h < MIN_HEIGHT) ||
584 (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) || 574 (plane->dst_w > MAX_WIDTH) || (plane->dst_w < MIN_WIDTH) ||
585 (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) { 575 (plane->dst_h > MAX_HEIGHT) || (plane->dst_h < MIN_HEIGHT)) {
586 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n", 576 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
587 layer->src_w, layer->src_h, 577 plane->src_w, plane->src_h,
588 layer->dst_w, layer->dst_h); 578 plane->dst_w, plane->dst_h);
589 return -EINVAL; 579 return -EINVAL;
590 } 580 }
591 cmd->top.input_viewport_size = cmd->top.input_frame_size = 581 cmd->top.input_viewport_size = cmd->top.input_frame_size =
592 layer->src_h << 16 | layer->src_w; 582 plane->src_h << 16 | plane->src_w;
593 cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w; 583 cmd->hvsrc.output_picture_size = plane->dst_h << 16 | plane->dst_w;
594 cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x; 584 cmd->top.input_viewport_ori = plane->src_y << 16 | plane->src_x;
595 585
596 /* Handle interlaced */ 586 /* Handle interlaced */
597 if (layer->fb->flags & DRM_MODE_FB_INTERLACED) { 587 if (plane->fb->flags & DRM_MODE_FB_INTERLACED) {
598 /* Top field to display */ 588 /* Top field to display */
599 cmd->top.config = TOP_CONFIG_INTER_TOP; 589 cmd->top.config = TOP_CONFIG_INTER_TOP;
600 590
601 /* Update pitches and vert size */ 591 /* Update pitches and vert size */
602 cmd->top.input_frame_size = (layer->src_h / 2) << 16 | 592 cmd->top.input_frame_size = (plane->src_h / 2) << 16 |
603 layer->src_w; 593 plane->src_w;
604 cmd->top.luma_processed_pitch *= 2; 594 cmd->top.luma_processed_pitch *= 2;
605 cmd->top.luma_src_pitch *= 2; 595 cmd->top.luma_src_pitch *= 2;
606 cmd->top.chroma_processed_pitch *= 2; 596 cmd->top.chroma_processed_pitch *= 2;
@@ -613,10 +603,10 @@ static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
613 } 603 }
614 604
615 /* Update hvsrc lut coef */ 605 /* Update hvsrc lut coef */
616 scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w; 606 scale_h = SCALE_FACTOR * plane->dst_w / plane->src_w;
617 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc); 607 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
618 608
619 scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h; 609 scale_v = SCALE_FACTOR * plane->dst_h / plane->src_h;
620 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc); 610 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
621 611
622 if (first_prepare) { 612 if (first_prepare) {
@@ -627,9 +617,9 @@ static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
627 } 617 }
628 618
629 /* Register VTG Vsync callback to handle bottom fields */ 619 /* Register VTG Vsync callback to handle bottom fields */
630 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && 620 if ((plane->fb->flags & DRM_MODE_FB_INTERLACED) &&
631 sti_vtg_register_client(hqvdp->vtg, 621 sti_vtg_register_client(hqvdp->vtg, &hqvdp->vtg_nb,
632 &hqvdp->vtg_nb, layer->mixer_id)) { 622 plane->mixer_id)) {
633 DRM_ERROR("Cannot register VTG notifier\n"); 623 DRM_ERROR("Cannot register VTG notifier\n");
634 return -ENXIO; 624 return -ENXIO;
635 } 625 }
@@ -638,12 +628,21 @@ static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
638 return 0; 628 return 0;
639} 629}
640 630
641static int sti_hqvdp_commit_layer(struct sti_layer *layer) 631/**
632 * sti_hqvdp_commit
633 * @plane: hqvdp plane
634 *
635 * Enables the HQVDP plane
636 *
637 * RETURNS:
638 * 0 on success.
639 */
640static int sti_hqvdp_commit(struct sti_plane *plane)
642{ 641{
643 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 642 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
644 int cmd_offset; 643 int cmd_offset;
645 644
646 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 645 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_plane_to_str(plane));
647 646
648 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 647 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
649 if (cmd_offset == -1) { 648 if (cmd_offset == -1) {
@@ -657,7 +656,7 @@ static int sti_hqvdp_commit_layer(struct sti_layer *layer)
657 hqvdp->curr_field_count++; 656 hqvdp->curr_field_count++;
658 657
659 /* Interlaced : get ready to display the bottom field at next Vsync */ 658 /* Interlaced : get ready to display the bottom field at next Vsync */
660 if (layer->fb->flags & DRM_MODE_FB_INTERLACED) 659 if (plane->fb->flags & DRM_MODE_FB_INTERLACED)
661 hqvdp->btm_field_pending = true; 660 hqvdp->btm_field_pending = true;
662 661
663 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n", 662 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
@@ -666,16 +665,25 @@ static int sti_hqvdp_commit_layer(struct sti_layer *layer)
666 return 0; 665 return 0;
667} 666}
668 667
669static int sti_hqvdp_disable_layer(struct sti_layer *layer) 668/**
669 * sti_hqvdp_disable
670 * @plane: hqvdp plane
671 *
672 * Disables the HQVDP plane
673 *
674 * RETURNS:
675 * 0 on success.
676 */
677static int sti_hqvdp_disable(struct sti_plane *plane)
670{ 678{
671 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer); 679 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
672 int i; 680 int i;
673 681
674 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 682 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(plane));
675 683
676 /* Unregister VTG Vsync callback */ 684 /* Unregister VTG Vsync callback */
677 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && 685 if ((plane->fb->flags & DRM_MODE_FB_INTERLACED) &&
678 sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb)) 686 sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
679 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 687 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
680 688
681 /* Set next cmd to NULL */ 689 /* Set next cmd to NULL */
@@ -696,9 +704,6 @@ static int sti_hqvdp_disable_layer(struct sti_layer *layer)
696 return -ENXIO; 704 return -ENXIO;
697 } 705 }
698 706
699 /* disable VID plane */
700 hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
701
702 return 0; 707 return 0;
703} 708}
704 709
@@ -758,32 +763,10 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
758 return 0; 763 return 0;
759} 764}
760 765
761static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id) 766static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
762{
763 struct drm_plane *plane;
764
765 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
766 struct sti_layer *layer = to_sti_layer(plane);
767
768 if (layer->desc == id)
769 return plane;
770 }
771
772 return NULL;
773}
774
775static void sti_hqvd_init(struct sti_layer *layer)
776{ 767{
777 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
778 int size; 768 int size;
779 769
780 /* find the plane macthing with vid 0 */
781 hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
782 if (!hqvdp->vid_plane) {
783 DRM_ERROR("Cannot find Main video layer\n");
784 return;
785 }
786
787 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb; 770 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
788 771
789 /* Allocate memory for the VDP commands */ 772 /* Allocate memory for the VDP commands */
@@ -799,24 +782,25 @@ static void sti_hqvd_init(struct sti_layer *layer)
799 memset(hqvdp->hqvdp_cmd, 0, size); 782 memset(hqvdp->hqvdp_cmd, 0, size);
800} 783}
801 784
802static const struct sti_layer_funcs hqvdp_ops = { 785static const struct sti_plane_funcs hqvdp_plane_ops = {
803 .get_formats = sti_hqvdp_get_formats, 786 .get_formats = sti_hqvdp_get_formats,
804 .get_nb_formats = sti_hqvdp_get_nb_formats, 787 .get_nb_formats = sti_hqvdp_get_nb_formats,
805 .init = sti_hqvd_init, 788 .prepare = sti_hqvdp_prepare,
806 .prepare = sti_hqvdp_prepare_layer, 789 .commit = sti_hqvdp_commit,
807 .commit = sti_hqvdp_commit_layer, 790 .disable = sti_hqvdp_disable,
808 .disable = sti_hqvdp_disable_layer,
809}; 791};
810 792
811struct sti_layer *sti_hqvdp_create(struct device *dev) 793struct sti_plane *sti_hqvdp_create(struct device *dev, int desc)
812{ 794{
813 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 795 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
814 796
815 hqvdp->layer.ops = &hqvdp_ops; 797 hqvdp->plane.desc = desc;
798 hqvdp->plane.ops = &hqvdp_plane_ops;
816 799
817 return &hqvdp->layer; 800 sti_hqvdp_init(hqvdp);
801
802 return &hqvdp->plane;
818} 803}
819EXPORT_SYMBOL(sti_hqvdp_create);
820 804
821static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp) 805static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
822{ 806{
@@ -859,6 +843,12 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
859 } *header; 843 } *header;
860 844
861 DRM_DEBUG_DRIVER("\n"); 845 DRM_DEBUG_DRIVER("\n");
846
847 if (hqvdp->xp70_initialized) {
848 DRM_INFO("HQVDP XP70 already initialized\n");
849 return;
850 }
851
862 /* Check firmware parts */ 852 /* Check firmware parts */
863 if (!firmware) { 853 if (!firmware) {
864 DRM_ERROR("Firmware not available\n"); 854 DRM_ERROR("Firmware not available\n");
@@ -946,7 +936,10 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
946 /* Launch Vsync */ 936 /* Launch Vsync */
947 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC); 937 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
948 938
949 DRM_INFO("HQVDP XP70 started\n"); 939 DRM_INFO("HQVDP XP70 initialized\n");
940
941 hqvdp->xp70_initialized = true;
942
950out: 943out:
951 release_firmware(firmware); 944 release_firmware(firmware);
952} 945}
@@ -955,7 +948,7 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
955{ 948{
956 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 949 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
957 struct drm_device *drm_dev = data; 950 struct drm_device *drm_dev = data;
958 struct sti_layer *layer; 951 struct sti_plane *plane;
959 int err; 952 int err;
960 953
961 DRM_DEBUG_DRIVER("\n"); 954 DRM_DEBUG_DRIVER("\n");
@@ -971,13 +964,13 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
971 return err; 964 return err;
972 } 965 }
973 966
974 layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs); 967 /* Create HQVDP plane once xp70 is initialized */
975 if (!layer) { 968 plane = sti_hqvdp_create(hqvdp->dev, STI_HQVDP_0);
969 if (plane)
970 sti_drm_plane_init(hqvdp->drm_dev, plane, 1,
971 DRM_PLANE_TYPE_OVERLAY);
972 else
976 DRM_ERROR("Can't create HQVDP plane\n"); 973 DRM_ERROR("Can't create HQVDP plane\n");
977 return -ENOMEM;
978 }
979
980 sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
981 974
982 return 0; 975 return 0;
983} 976}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
deleted file mode 100644
index cd5ecd0a6dea..000000000000
--- a/drivers/gpu/drm/sti/sti_hqvdp.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_HQVDP_H_
8#define _STI_HQVDP_H_
9
10struct sti_layer *sti_hqvdp_create(struct device *dev);
11
12#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
deleted file mode 100644
index 899104f9d4bc..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ /dev/null
@@ -1,213 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_gem_cma_helper.h>
11#include <drm/drm_fb_cma_helper.h>
12
13#include "sti_compositor.h"
14#include "sti_cursor.h"
15#include "sti_gdp.h"
16#include "sti_hqvdp.h"
17#include "sti_layer.h"
18#include "sti_vid.h"
19
20const char *sti_layer_to_str(struct sti_layer *layer)
21{
22 switch (layer->desc) {
23 case STI_GDP_0:
24 return "GDP0";
25 case STI_GDP_1:
26 return "GDP1";
27 case STI_GDP_2:
28 return "GDP2";
29 case STI_GDP_3:
30 return "GDP3";
31 case STI_VID_0:
32 return "VID0";
33 case STI_VID_1:
34 return "VID1";
35 case STI_CURSOR:
36 return "CURSOR";
37 case STI_HQVDP_0:
38 return "HQVDP0";
39 default:
40 return "<UNKNOWN LAYER>";
41 }
42}
43EXPORT_SYMBOL(sti_layer_to_str);
44
45struct sti_layer *sti_layer_create(struct device *dev, int desc,
46 void __iomem *baseaddr)
47{
48
49 struct sti_layer *layer = NULL;
50
51 switch (desc & STI_LAYER_TYPE_MASK) {
52 case STI_GDP:
53 layer = sti_gdp_create(dev, desc);
54 break;
55 case STI_VID:
56 layer = sti_vid_create(dev);
57 break;
58 case STI_CUR:
59 layer = sti_cursor_create(dev);
60 break;
61 case STI_VDP:
62 layer = sti_hqvdp_create(dev);
63 break;
64 }
65
66 if (!layer) {
67 DRM_ERROR("Failed to create layer\n");
68 return NULL;
69 }
70
71 layer->desc = desc;
72 layer->dev = dev;
73 layer->regs = baseaddr;
74
75 layer->ops->init(layer);
76
77 DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
78
79 return layer;
80}
81EXPORT_SYMBOL(sti_layer_create);
82
83int sti_layer_prepare(struct sti_layer *layer,
84 struct drm_crtc *crtc,
85 struct drm_framebuffer *fb,
86 struct drm_display_mode *mode, int mixer_id,
87 int dest_x, int dest_y, int dest_w, int dest_h,
88 int src_x, int src_y, int src_w, int src_h)
89{
90 int ret;
91 unsigned int i;
92 struct drm_gem_cma_object *cma_obj;
93
94 if (!layer || !fb || !mode) {
95 DRM_ERROR("Null fb, layer or mode\n");
96 return 1;
97 }
98
99 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
100 if (!cma_obj) {
101 DRM_ERROR("Can't get CMA GEM object for fb\n");
102 return 1;
103 }
104
105 layer->crtc = crtc;
106 layer->fb = fb;
107 layer->mode = mode;
108 layer->mixer_id = mixer_id;
109 layer->dst_x = dest_x;
110 layer->dst_y = dest_y;
111 layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
112 layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
113 layer->src_x = src_x;
114 layer->src_y = src_y;
115 layer->src_w = src_w;
116 layer->src_h = src_h;
117 layer->format = fb->pixel_format;
118 layer->vaddr = cma_obj->vaddr;
119 layer->paddr = cma_obj->paddr;
120 for (i = 0; i < 4; i++) {
121 layer->pitches[i] = fb->pitches[i];
122 layer->offsets[i] = fb->offsets[i];
123 }
124
125 DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
126 sti_layer_to_str(layer),
127 layer->mixer_id);
128 DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
129 sti_layer_to_str(layer),
130 layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
131 layer->src_w, layer->src_h, layer->src_x,
132 layer->src_y);
133
134 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
135 (char *)&layer->format, (unsigned long)layer->paddr);
136
137 if (!layer->ops->prepare)
138 goto err_no_prepare;
139
140 ret = layer->ops->prepare(layer, !layer->enabled);
141 if (!ret)
142 layer->enabled = true;
143
144 return ret;
145
146err_no_prepare:
147 DRM_ERROR("Cannot prepare\n");
148 return 1;
149}
150
151int sti_layer_commit(struct sti_layer *layer)
152{
153 if (!layer)
154 return 1;
155
156 if (!layer->ops->commit)
157 goto err_no_commit;
158
159 return layer->ops->commit(layer);
160
161err_no_commit:
162 DRM_ERROR("Cannot commit\n");
163 return 1;
164}
165
166int sti_layer_disable(struct sti_layer *layer)
167{
168 int ret;
169
170 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
171 if (!layer)
172 return 1;
173
174 if (!layer->enabled)
175 return 0;
176
177 if (!layer->ops->disable)
178 goto err_no_disable;
179
180 ret = layer->ops->disable(layer);
181 if (!ret)
182 layer->enabled = false;
183 else
184 DRM_ERROR("Disable failed\n");
185
186 return ret;
187
188err_no_disable:
189 DRM_ERROR("Cannot disable\n");
190 return 1;
191}
192
193const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
194{
195 if (!layer)
196 return NULL;
197
198 if (!layer->ops->get_formats)
199 return NULL;
200
201 return layer->ops->get_formats(layer);
202}
203
204unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
205{
206 if (!layer)
207 return 0;
208
209 if (!layer->ops->get_nb_formats)
210 return 0;
211
212 return layer->ops->get_nb_formats(layer);
213}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
deleted file mode 100644
index ceff497f557e..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _STI_LAYER_H_
10#define _STI_LAYER_H_
11
12#include <drm/drmP.h>
13
14#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
15
16#define STI_LAYER_TYPE_SHIFT 8
17#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
18
19struct sti_layer;
20
21enum sti_layer_type {
22 STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
23 STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
24 STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
25 STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
26 STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
27};
28
29enum sti_layer_id_of_type {
30 STI_ID_0 = 0,
31 STI_ID_1 = 1,
32 STI_ID_2 = 2,
33 STI_ID_3 = 3
34};
35
36enum sti_layer_desc {
37 STI_GDP_0 = STI_GDP | STI_ID_0,
38 STI_GDP_1 = STI_GDP | STI_ID_1,
39 STI_GDP_2 = STI_GDP | STI_ID_2,
40 STI_GDP_3 = STI_GDP | STI_ID_3,
41 STI_VID_0 = STI_VID | STI_ID_0,
42 STI_VID_1 = STI_VID | STI_ID_1,
43 STI_HQVDP_0 = STI_VDP | STI_ID_0,
44 STI_CURSOR = STI_CUR,
45 STI_BACK = STI_BCK
46};
47
48/**
49 * STI layer functions structure
50 *
51 * @get_formats: get layer supported formats
52 * @get_nb_formats: get number of format supported
53 * @init: initialize the layer
54 * @prepare: prepare layer before rendering
55 * @commit: set layer for rendering
56 * @disable: disable layer
57 */
58struct sti_layer_funcs {
59 const uint32_t* (*get_formats)(struct sti_layer *layer);
60 unsigned int (*get_nb_formats)(struct sti_layer *layer);
61 void (*init)(struct sti_layer *layer);
62 int (*prepare)(struct sti_layer *layer, bool first_prepare);
63 int (*commit)(struct sti_layer *layer);
64 int (*disable)(struct sti_layer *layer);
65};
66
67/**
68 * STI layer structure
69 *
70 * @plane: drm plane it is bound to (if any)
71 * @fb: drm fb it is bound to
72 * @crtc: crtc it is bound to
73 * @mode: display mode
74 * @desc: layer type & id
75 * @device: driver device
76 * @regs: layer registers
77 * @ops: layer functions
78 * @zorder: layer z-order
79 * @mixer_id: id of the mixer used to display the layer
80 * @enabled: to know if the layer is active or not
81 * @src_x src_y: coordinates of the input (fb) area
82 * @src_w src_h: size of the input (fb) area
83 * @dst_x dst_y: coordinates of the output (crtc) area
84 * @dst_w dst_h: size of the output (crtc) area
85 * @format: format
86 * @pitches: pitch of 'planes' (eg: Y, U, V)
87 * @offsets: offset of 'planes'
88 * @vaddr: virtual address of the input buffer
89 * @paddr: physical address of the input buffer
90 */
91struct sti_layer {
92 struct drm_plane plane;
93 struct drm_framebuffer *fb;
94 struct drm_crtc *crtc;
95 struct drm_display_mode *mode;
96 enum sti_layer_desc desc;
97 struct device *dev;
98 void __iomem *regs;
99 const struct sti_layer_funcs *ops;
100 int zorder;
101 int mixer_id;
102 bool enabled;
103 int src_x, src_y;
104 int src_w, src_h;
105 int dst_x, dst_y;
106 int dst_w, dst_h;
107 uint32_t format;
108 unsigned int pitches[4];
109 unsigned int offsets[4];
110 void *vaddr;
111 dma_addr_t paddr;
112};
113
114struct sti_layer *sti_layer_create(struct device *dev, int desc,
115 void __iomem *baseaddr);
116int sti_layer_prepare(struct sti_layer *layer,
117 struct drm_crtc *crtc,
118 struct drm_framebuffer *fb,
119 struct drm_display_mode *mode,
120 int mixer_id,
121 int dest_x, int dest_y,
122 int dest_w, int dest_h,
123 int src_x, int src_y,
124 int src_w, int src_h);
125int sti_layer_commit(struct sti_layer *layer);
126int sti_layer_disable(struct sti_layer *layer);
127const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
128unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
129const char *sti_layer_to_str(struct sti_layer *layer);
130
131#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 61a2048cf5d6..d5a96561c8ce 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -101,61 +101,57 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
101 sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds); 101 sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
102} 102}
103 103
104int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer) 104int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
105{ 105{
106 int layer_id, depth = layer->zorder; 106 int plane_id, depth = plane->zorder;
107 unsigned int i; 107 unsigned int i;
108 u32 mask, val; 108 u32 mask, val;
109 109
110 if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL)) 110 if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
111 return 1; 111 return 1;
112 112
113 switch (layer->desc) { 113 switch (plane->desc) {
114 case STI_GDP_0: 114 case STI_GDP_0:
115 layer_id = GAM_DEPTH_GDP0_ID; 115 plane_id = GAM_DEPTH_GDP0_ID;
116 break; 116 break;
117 case STI_GDP_1: 117 case STI_GDP_1:
118 layer_id = GAM_DEPTH_GDP1_ID; 118 plane_id = GAM_DEPTH_GDP1_ID;
119 break; 119 break;
120 case STI_GDP_2: 120 case STI_GDP_2:
121 layer_id = GAM_DEPTH_GDP2_ID; 121 plane_id = GAM_DEPTH_GDP2_ID;
122 break; 122 break;
123 case STI_GDP_3: 123 case STI_GDP_3:
124 layer_id = GAM_DEPTH_GDP3_ID; 124 plane_id = GAM_DEPTH_GDP3_ID;
125 break; 125 break;
126 case STI_VID_0:
127 case STI_HQVDP_0: 126 case STI_HQVDP_0:
128 layer_id = GAM_DEPTH_VID0_ID; 127 plane_id = GAM_DEPTH_VID0_ID;
129 break;
130 case STI_VID_1:
131 layer_id = GAM_DEPTH_VID1_ID;
132 break; 128 break;
133 case STI_CURSOR: 129 case STI_CURSOR:
134 /* no need to set depth for cursor */ 130 /* no need to set depth for cursor */
135 return 0; 131 return 0;
136 default: 132 default:
137 DRM_ERROR("Unknown layer %d\n", layer->desc); 133 DRM_ERROR("Unknown plane %d\n", plane->desc);
138 return 1; 134 return 1;
139 } 135 }
140 136
141 /* Search if a previous depth was already assigned to the layer */ 137 /* Search if a previous depth was already assigned to the plane */
142 val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB); 138 val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
143 for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) { 139 for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
144 mask = GAM_DEPTH_MASK_ID << (3 * i); 140 mask = GAM_DEPTH_MASK_ID << (3 * i);
145 if ((val & mask) == layer_id << (3 * i)) 141 if ((val & mask) == plane_id << (3 * i))
146 break; 142 break;
147 } 143 }
148 144
149 mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1)); 145 mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
150 layer_id = layer_id << (3 * (depth - 1)); 146 plane_id = plane_id << (3 * (depth - 1));
151 147
152 DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer), 148 DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
153 sti_layer_to_str(layer), depth); 149 sti_plane_to_str(plane), depth);
154 dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n", 150 dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
155 layer_id, mask); 151 plane_id, mask);
156 152
157 val &= ~mask; 153 val &= ~mask;
158 val |= layer_id; 154 val |= plane_id;
159 sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val); 155 sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
160 156
161 dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n", 157 dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@@ -185,9 +181,9 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
185 return 0; 181 return 0;
186} 182}
187 183
188static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) 184static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
189{ 185{
190 switch (layer->desc) { 186 switch (plane->desc) {
191 case STI_BACK: 187 case STI_BACK:
192 return GAM_CTL_BACK_MASK; 188 return GAM_CTL_BACK_MASK;
193 case STI_GDP_0: 189 case STI_GDP_0:
@@ -198,11 +194,8 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
198 return GAM_CTL_GDP2_MASK; 194 return GAM_CTL_GDP2_MASK;
199 case STI_GDP_3: 195 case STI_GDP_3:
200 return GAM_CTL_GDP3_MASK; 196 return GAM_CTL_GDP3_MASK;
201 case STI_VID_0:
202 case STI_HQVDP_0: 197 case STI_HQVDP_0:
203 return GAM_CTL_VID0_MASK; 198 return GAM_CTL_VID0_MASK;
204 case STI_VID_1:
205 return GAM_CTL_VID1_MASK;
206 case STI_CURSOR: 199 case STI_CURSOR:
207 return GAM_CTL_CURSOR_MASK; 200 return GAM_CTL_CURSOR_MASK;
208 default: 201 default:
@@ -210,17 +203,17 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
210 } 203 }
211} 204}
212 205
213int sti_mixer_set_layer_status(struct sti_mixer *mixer, 206int sti_mixer_set_plane_status(struct sti_mixer *mixer,
214 struct sti_layer *layer, bool status) 207 struct sti_plane *plane, bool status)
215{ 208{
216 u32 mask, val; 209 u32 mask, val;
217 210
218 DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable", 211 DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
219 sti_mixer_to_str(mixer), sti_layer_to_str(layer)); 212 sti_mixer_to_str(mixer), sti_plane_to_str(plane));
220 213
221 mask = sti_mixer_get_layer_mask(layer); 214 mask = sti_mixer_get_plane_mask(plane);
222 if (!mask) { 215 if (!mask) {
223 DRM_ERROR("Can not find layer mask\n"); 216 DRM_ERROR("Can't find layer mask\n");
224 return -EINVAL; 217 return -EINVAL;
225 } 218 }
226 219
@@ -232,11 +225,11 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
232 return 0; 225 return 0;
233} 226}
234 227
235void sti_mixer_clear_all_layers(struct sti_mixer *mixer) 228void sti_mixer_clear_all_planes(struct sti_mixer *mixer)
236{ 229{
237 u32 val; 230 u32 val;
238 231
239 DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer)); 232 DRM_DEBUG_DRIVER("%s clear all planes\n", sti_mixer_to_str(mixer));
240 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000; 233 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
241 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val); 234 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
242} 235}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index eb663f65f814..9d51eac26e90 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -11,7 +11,7 @@
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13 13
14#include "sti_layer.h" 14#include "sti_drm_plane.h"
15 15
16#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc) 16#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
17 17
@@ -29,7 +29,7 @@ struct sti_mixer {
29 struct device *dev; 29 struct device *dev;
30 void __iomem *regs; 30 void __iomem *regs;
31 int id; 31 int id;
32 struct drm_crtc drm_crtc; 32 struct drm_crtc drm_crtc;
33 struct drm_pending_vblank_event *pending_event; 33 struct drm_pending_vblank_event *pending_event;
34 bool enabled; 34 bool enabled;
35}; 35};
@@ -37,14 +37,14 @@ struct sti_mixer {
37const char *sti_mixer_to_str(struct sti_mixer *mixer); 37const char *sti_mixer_to_str(struct sti_mixer *mixer);
38 38
39struct sti_mixer *sti_mixer_create(struct device *dev, int id, 39struct sti_mixer *sti_mixer_create(struct device *dev, int id,
40 void __iomem *baseaddr); 40 void __iomem *baseaddr);
41 41
42int sti_mixer_set_layer_status(struct sti_mixer *mixer, 42int sti_mixer_set_plane_status(struct sti_mixer *mixer,
43 struct sti_layer *layer, bool status); 43 struct sti_plane *plane, bool status);
44void sti_mixer_clear_all_layers(struct sti_mixer *mixer); 44void sti_mixer_clear_all_planes(struct sti_mixer *mixer);
45int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer); 45int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
46int sti_mixer_active_video_area(struct sti_mixer *mixer, 46int sti_mixer_active_video_area(struct sti_mixer *mixer,
47 struct drm_display_mode *mode); 47 struct drm_display_mode *mode);
48 48
49void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); 49void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
50 50
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 10ced6a479f4..b82a34f2a60e 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -6,7 +6,7 @@
6 6
7#include <drm/drmP.h> 7#include <drm/drmP.h>
8 8
9#include "sti_layer.h" 9#include "sti_drm_plane.h"
10#include "sti_vid.h" 10#include "sti_vid.h"
11#include "sti_vtg.h" 11#include "sti_vtg.h"
12 12
@@ -43,27 +43,20 @@
43#define VID_MPR2_BT709 0x07150545 43#define VID_MPR2_BT709 0x07150545
44#define VID_MPR3_BT709 0x00000AE8 44#define VID_MPR3_BT709 0x00000AE8
45 45
46static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare) 46int sti_vid_commit(struct sti_vid *vid, struct sti_plane *plane)
47{ 47{
48 u32 val; 48 struct drm_display_mode *mode = plane->mode;
49 u32 val, ydo, xdo, yds, xds;
49 50
50 /* Unmask */ 51 /* Unmask */
51 val = readl(vid->regs + VID_CTL); 52 val = readl(vid->regs + VID_CTL);
52 val &= ~VID_CTL_IGNORE; 53 val &= ~VID_CTL_IGNORE;
53 writel(val, vid->regs + VID_CTL); 54 writel(val, vid->regs + VID_CTL);
54 55
55 return 0; 56 ydo = sti_vtg_get_line_number(*mode, plane->dst_y);
56} 57 yds = sti_vtg_get_line_number(*mode, plane->dst_y + plane->dst_h - 1);
57 58 xdo = sti_vtg_get_pixel_number(*mode, plane->dst_x);
58static int sti_vid_commit_layer(struct sti_layer *vid) 59 xds = sti_vtg_get_pixel_number(*mode, plane->dst_x + plane->dst_w - 1);
59{
60 struct drm_display_mode *mode = vid->mode;
61 u32 ydo, xdo, yds, xds;
62
63 ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
64 yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
65 xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
66 xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
67 60
68 writel((ydo << 16) | xdo, vid->regs + VID_VPO); 61 writel((ydo << 16) | xdo, vid->regs + VID_VPO);
69 writel((yds << 16) | xds, vid->regs + VID_VPS); 62 writel((yds << 16) | xds, vid->regs + VID_VPS);
@@ -71,7 +64,7 @@ static int sti_vid_commit_layer(struct sti_layer *vid)
71 return 0; 64 return 0;
72} 65}
73 66
74static int sti_vid_disable_layer(struct sti_layer *vid) 67int sti_vid_disable(struct sti_vid *vid)
75{ 68{
76 u32 val; 69 u32 val;
77 70
@@ -83,17 +76,7 @@ static int sti_vid_disable_layer(struct sti_layer *vid)
83 return 0; 76 return 0;
84} 77}
85 78
86static const uint32_t *sti_vid_get_formats(struct sti_layer *layer) 79static void sti_vid_init(struct sti_vid *vid)
87{
88 return NULL;
89}
90
91static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
92{
93 return 0;
94}
95
96static void sti_vid_init(struct sti_layer *vid)
97{ 80{
98 /* Enable PSI, Mask layer */ 81 /* Enable PSI, Mask layer */
99 writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL); 82 writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@@ -113,18 +96,10 @@ static void sti_vid_init(struct sti_layer *vid)
113 writel(VID_CSAT_DFLT, vid->regs + VID_CSAT); 96 writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
114} 97}
115 98
116static const struct sti_layer_funcs vid_ops = { 99struct sti_vid *sti_vid_create(struct device *dev, int id,
117 .get_formats = sti_vid_get_formats, 100 void __iomem *baseaddr)
118 .get_nb_formats = sti_vid_get_nb_formats,
119 .init = sti_vid_init,
120 .prepare = sti_vid_prepare_layer,
121 .commit = sti_vid_commit_layer,
122 .disable = sti_vid_disable_layer,
123};
124
125struct sti_layer *sti_vid_create(struct device *dev)
126{ 101{
127 struct sti_layer *vid; 102 struct sti_vid *vid;
128 103
129 vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL); 104 vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
130 if (!vid) { 105 if (!vid) {
@@ -132,7 +107,11 @@ struct sti_layer *sti_vid_create(struct device *dev)
132 return NULL; 107 return NULL;
133 } 108 }
134 109
135 vid->ops = &vid_ops; 110 vid->dev = dev;
111 vid->regs = baseaddr;
112 vid->id = id;
113
114 sti_vid_init(vid);
136 115
137 return vid; 116 return vid;
138} 117}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 2c0aecd63294..cc680a23cc5d 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -7,6 +7,22 @@
7#ifndef _STI_VID_H_ 7#ifndef _STI_VID_H_
8#define _STI_VID_H_ 8#define _STI_VID_H_
9 9
10struct sti_layer *sti_vid_create(struct device *dev); 10/**
11 * STI VID structure
12 *
13 * @dev: driver device
14 * @regs: vid registers
15 * @id: id of the vid
16 */
17struct sti_vid {
18 struct device *dev;
19 void __iomem *regs;
20 int id;
21};
22
23int sti_vid_commit(struct sti_vid *vid, struct sti_plane *plane);
24int sti_vid_disable(struct sti_vid *vid);
25struct sti_vid *sti_vid_create(struct device *dev, int id,
26 void __iomem *baseaddr);
11 27
12#endif 28#endif