aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-08-13 20:14:23 -0400
committerDave Airlie <airlied@redhat.com>2015-08-13 20:14:23 -0400
commite1474e7bdf6b16db41cc12b8e7b3fefd8668f3cf (patch)
tree6b6c5a2d79c36e7e06993d608b4ffd3c3222afd4
parent1ce4200df02aac780fe92c92b31497891c36c3ac (diff)
parent29d1dc62e1618192a25bd2eae9617529b9930cfc (diff)
Merge branch 'drm-sti-next-atomic-2015-08-11' of http://git.linaro.org/people/benjamin.gaignard/kernel into drm-next
This serie of patches fix minor bugs around how driver sub-components are bind and planes z-ordering. The main part is about atomic support: using more atomic helpers allow us to simplify the code (~300 lines removed) and to ahve a better match between drm concepts (planes and crtc) and hardware split. [airlied: fixed up conflict in atomic code] * 'drm-sti-next-atomic-2015-08-11' of http://git.linaro.org/people/benjamin.gaignard/kernel: drm/sti: atomic crtc/plane update drm/sti: rename files and functions drm/sti: code clean up drm/sti: fix dynamic z-ordering drm: sti: fix sub-components bind
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt72
-rw-r--r--drivers/gpu/drm/sti/Makefile7
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c141
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c (renamed from drivers/gpu/drm/sti/sti_drm_crtc.c)213
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c243
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h5
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c251
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c (renamed from drivers/gpu/drm/sti/sti_drm_drv.c)147
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h (renamed from drivers/gpu/drm/sti/sti_drm_drv.h)6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c536
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h7
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c27
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c482
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c213
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h131
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c72
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h27
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c122
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h71
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c54
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c72
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h19
27 files changed, 1346 insertions, 1658 deletions
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 6b1d75f1a529..a36dfce0032e 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -52,10 +52,9 @@ STMicroelectronics stih4xx platforms
52 See ../reset/reset.txt for details. 52 See ../reset/reset.txt for details.
53 - reset-names: names of the resets listed in resets property in the same 53 - reset-names: names of the resets listed in resets property in the same
54 order. 54 order.
55 - ranges: to allow probing of subdevices
56 55
57- sti-hdmi: hdmi output block 56- sti-hdmi: hdmi output block
58 must be a child of sti-tvout 57 must be a child of sti-display-subsystem
59 Required properties: 58 Required properties:
60 - compatible: "st,stih<chip>-hdmi"; 59 - compatible: "st,stih<chip>-hdmi";
61 - reg: Physical base address of the IP registers and length of memory mapped region. 60 - reg: Physical base address of the IP registers and length of memory mapped region.
@@ -72,7 +71,7 @@ STMicroelectronics stih4xx platforms
72 71
73sti-hda: 72sti-hda:
74 Required properties: 73 Required properties:
75 must be a child of sti-tvout 74 must be a child of sti-display-subsystem
76 - compatible: "st,stih<chip>-hda" 75 - compatible: "st,stih<chip>-hda"
77 - reg: Physical base address of the IP registers and length of memory mapped region. 76 - reg: Physical base address of the IP registers and length of memory mapped region.
78 - reg-names: names of the mapped memory regions listed in regs property in 77 - reg-names: names of the mapped memory regions listed in regs property in
@@ -85,7 +84,7 @@ sti-hda:
85 84
86sti-dvo: 85sti-dvo:
87 Required properties: 86 Required properties:
88 must be a child of sti-tvout 87 must be a child of sti-display-subsystem
89 - compatible: "st,stih<chip>-dvo" 88 - compatible: "st,stih<chip>-dvo"
90 - reg: Physical base address of the IP registers and length of memory mapped region. 89 - reg: Physical base address of the IP registers and length of memory mapped region.
91 - reg-names: names of the mapped memory regions listed in regs property in 90 - reg-names: names of the mapped memory regions listed in regs property in
@@ -195,38 +194,37 @@ Example:
195 reg-names = "tvout-reg", "hda-reg", "syscfg"; 194 reg-names = "tvout-reg", "hda-reg", "syscfg";
196 reset-names = "tvout"; 195 reset-names = "tvout";
197 resets = <&softreset STIH416_HDTVOUT_SOFTRESET>; 196 resets = <&softreset STIH416_HDTVOUT_SOFTRESET>;
198 ranges; 197 };
199 198
200 sti-hdmi@fe85c000 { 199 sti-hdmi@fe85c000 {
201 compatible = "st,stih416-hdmi"; 200 compatible = "st,stih416-hdmi";
202 reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>; 201 reg = <0xfe85c000 0x1000>, <0xfe830000 0x10000>;
203 reg-names = "hdmi-reg", "syscfg"; 202 reg-names = "hdmi-reg", "syscfg";
204 interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>; 203 interrupts = <GIC_SPI 173 IRQ_TYPE_NONE>;
205 interrupt-names = "irq"; 204 interrupt-names = "irq";
206 clock-names = "pix", "tmds", "phy", "audio"; 205 clock-names = "pix", "tmds", "phy", "audio";
207 clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>; 206 clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
208 }; 207 };
209 208
210 sti-hda@fe85a000 { 209 sti-hda@fe85a000 {
211 compatible = "st,stih416-hda"; 210 compatible = "st,stih416-hda";
212 reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>; 211 reg = <0xfe85a000 0x400>, <0xfe83085c 0x4>;
213 reg-names = "hda-reg", "video-dacs-ctrl"; 212 reg-names = "hda-reg", "video-dacs-ctrl";
214 clock-names = "pix", "hddac"; 213 clock-names = "pix", "hddac";
215 clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>; 214 clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
216 }; 215 };
217 216
218 sti-dvo@8d00400 { 217 sti-dvo@8d00400 {
219 compatible = "st,stih407-dvo"; 218 compatible = "st,stih407-dvo";
220 reg = <0x8d00400 0x200>; 219 reg = <0x8d00400 0x200>;
221 reg-names = "dvo-reg"; 220 reg-names = "dvo-reg";
222 clock-names = "dvo_pix", "dvo", 221 clock-names = "dvo_pix", "dvo",
223 "main_parent", "aux_parent"; 222 "main_parent", "aux_parent";
224 clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>, 223 clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
225 <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>; 224 <&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
226 pinctrl-names = "default"; 225 pinctrl-names = "default";
227 pinctrl-0 = <&pinctrl_dvo>; 226 pinctrl-0 = <&pinctrl_dvo>;
228 sti,panel = <&panel_dvo>; 227 sti,panel = <&panel_dvo>;
229 };
230 }; 228 };
231 229
232 sti-hqvdp@9c000000 { 230 sti-hqvdp@9c000000 {
@@ -237,7 +235,7 @@ Example:
237 reset-names = "hqvdp"; 235 reset-names = "hqvdp";
238 resets = <&softreset STIH407_HDQVDP_SOFTRESET>; 236 resets = <&softreset STIH407_HDQVDP_SOFTRESET>;
239 st,vtg = <&vtg_main>; 237 st,vtg = <&vtg_main>;
240 }; 238 };
241 }; 239 };
242 ... 240 ...
243}; 241};
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index f0f1e4ee2d92..e27490b492a5 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,12 +1,11 @@
1sticompositor-y := \ 1sticompositor-y := \
2 sti_layer.o \
3 sti_mixer.o \ 2 sti_mixer.o \
4 sti_gdp.o \ 3 sti_gdp.o \
5 sti_vid.o \ 4 sti_vid.o \
6 sti_cursor.o \ 5 sti_cursor.o \
7 sti_compositor.o \ 6 sti_compositor.o \
8 sti_drm_crtc.o \ 7 sti_crtc.o \
9 sti_drm_plane.o 8 sti_plane.o
10 9
11stihdmi-y := sti_hdmi.o \ 10stihdmi-y := sti_hdmi.o \
12 sti_hdmi_tx3g0c55phy.o \ 11 sti_hdmi_tx3g0c55phy.o \
@@ -24,4 +23,4 @@ obj-$(CONFIG_DRM_STI) = \
24 sticompositor.o \ 23 sticompositor.o \
25 sti_hqvdp.o \ 24 sti_hqvdp.o \
26 stidvo.o \ 25 stidvo.o \
27 sti_drm_drv.o 26 sti_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 43215d3020fb..c652627b1bca 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -14,10 +14,12 @@
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15 15
16#include "sti_compositor.h" 16#include "sti_compositor.h"
17#include "sti_drm_crtc.h" 17#include "sti_crtc.h"
18#include "sti_drm_drv.h" 18#include "sti_cursor.h"
19#include "sti_drm_plane.h" 19#include "sti_drv.h"
20#include "sti_gdp.h" 20#include "sti_gdp.h"
21#include "sti_plane.h"
22#include "sti_vid.h"
21#include "sti_vtg.h" 23#include "sti_vtg.h"
22 24
23/* 25/*
@@ -31,7 +33,7 @@ struct sti_compositor_data stih407_compositor_data = {
31 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, 33 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
32 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300}, 34 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
33 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400}, 35 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
34 {STI_VID_SUBDEV, (int)STI_VID_0, 0x700}, 36 {STI_VID_SUBDEV, (int)STI_HQVDP_0, 0x700},
35 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}, 37 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
36 {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00}, 38 {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
37 }, 39 },
@@ -53,14 +55,29 @@ struct sti_compositor_data stih416_compositor_data = {
53 }, 55 },
54}; 56};
55 57
56static int sti_compositor_init_subdev(struct sti_compositor *compo, 58static int sti_compositor_bind(struct device *dev,
57 struct sti_compositor_subdev_descriptor *desc, 59 struct device *master,
58 unsigned int array_size) 60 void *data)
59{ 61{
60 unsigned int i, mixer_id = 0, layer_id = 0; 62 struct sti_compositor *compo = dev_get_drvdata(dev);
63 struct drm_device *drm_dev = data;
64 unsigned int i, mixer_id = 0, vid_id = 0, crtc_id = 0;
65 struct sti_private *dev_priv = drm_dev->dev_private;
66 struct drm_plane *cursor = NULL;
67 struct drm_plane *primary = NULL;
68 struct sti_compositor_subdev_descriptor *desc = compo->data.subdev_desc;
69 unsigned int array_size = compo->data.nb_subdev;
70
71 dev_priv->compo = compo;
61 72
73 /* Register mixer subdev and video subdev first */
62 for (i = 0; i < array_size; i++) { 74 for (i = 0; i < array_size; i++) {
63 switch (desc[i].type) { 75 switch (desc[i].type) {
76 case STI_VID_SUBDEV:
77 compo->vid[vid_id++] =
78 sti_vid_create(compo->dev, desc[i].id,
79 compo->regs + desc[i].offset);
80 break;
64 case STI_MIXER_MAIN_SUBDEV: 81 case STI_MIXER_MAIN_SUBDEV:
65 case STI_MIXER_AUX_SUBDEV: 82 case STI_MIXER_AUX_SUBDEV:
66 compo->mixer[mixer_id++] = 83 compo->mixer[mixer_id++] =
@@ -68,83 +85,68 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
68 compo->regs + desc[i].offset); 85 compo->regs + desc[i].offset);
69 break; 86 break;
70 case STI_GPD_SUBDEV: 87 case STI_GPD_SUBDEV:
71 case STI_VID_SUBDEV:
72 case STI_CURSOR_SUBDEV: 88 case STI_CURSOR_SUBDEV:
73 compo->layer[layer_id++] = 89 /* Nothing to do, wait for the second round */
74 sti_layer_create(compo->dev, desc[i].id,
75 compo->regs + desc[i].offset);
76 break; 90 break;
77 default: 91 default:
78 DRM_ERROR("Unknow subdev compoment type\n"); 92 DRM_ERROR("Unknow subdev compoment type\n");
79 return 1; 93 return 1;
80 } 94 }
81
82 } 95 }
83 compo->nb_mixers = mixer_id;
84 compo->nb_layers = layer_id;
85
86 return 0;
87}
88
89static int sti_compositor_bind(struct device *dev, struct device *master,
90 void *data)
91{
92 struct sti_compositor *compo = dev_get_drvdata(dev);
93 struct drm_device *drm_dev = data;
94 unsigned int i, crtc = 0, plane = 0;
95 struct sti_drm_private *dev_priv = drm_dev->dev_private;
96 struct drm_plane *cursor = NULL;
97 struct drm_plane *primary = NULL;
98 96
99 dev_priv->compo = compo; 97 /* Register the other subdevs, create crtc and planes */
100 98 for (i = 0; i < array_size; i++) {
101 for (i = 0; i < compo->nb_layers; i++) { 99 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
102 if (compo->layer[i]) {
103 enum sti_layer_desc desc = compo->layer[i]->desc;
104 enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
105 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
106 100
107 if (crtc < compo->nb_mixers) 101 if (crtc_id < mixer_id)
108 plane_type = DRM_PLANE_TYPE_PRIMARY; 102 plane_type = DRM_PLANE_TYPE_PRIMARY;
109 103
110 switch (type) { 104 switch (desc[i].type) {
111 case STI_CUR: 105 case STI_MIXER_MAIN_SUBDEV:
112 cursor = sti_drm_plane_init(drm_dev, 106 case STI_MIXER_AUX_SUBDEV:
113 compo->layer[i], 107 case STI_VID_SUBDEV:
114 1, DRM_PLANE_TYPE_CURSOR); 108 /* Nothing to do, already done at the first round */
115 break; 109 break;
116 case STI_GDP: 110 case STI_CURSOR_SUBDEV:
117 case STI_VID: 111 cursor = sti_cursor_create(drm_dev, compo->dev,
118 primary = sti_drm_plane_init(drm_dev, 112 desc[i].id,
119 compo->layer[i], 113 compo->regs + desc[i].offset,
120 (1 << compo->nb_mixers) - 1, 114 1);
121 plane_type); 115 if (!cursor) {
122 plane++; 116 DRM_ERROR("Can't create CURSOR plane\n");
123 break; 117 break;
124 case STI_BCK: 118 }
125 case STI_VDP: 119 break;
120 case STI_GPD_SUBDEV:
121 primary = sti_gdp_create(drm_dev, compo->dev,
122 desc[i].id,
123 compo->regs + desc[i].offset,
124 (1 << mixer_id) - 1,
125 plane_type);
126 if (!primary) {
127 DRM_ERROR("Can't create GDP plane\n");
126 break; 128 break;
127 } 129 }
130 break;
131 default:
132 DRM_ERROR("Unknown subdev compoment type\n");
133 return 1;
134 }
128 135
129 /* The first planes are reserved for primary planes*/ 136 /* The first planes are reserved for primary planes*/
130 if (crtc < compo->nb_mixers && primary) { 137 if (crtc_id < mixer_id && primary) {
131 sti_drm_crtc_init(drm_dev, compo->mixer[crtc], 138 sti_crtc_init(drm_dev, compo->mixer[crtc_id],
132 primary, cursor); 139 primary, cursor);
133 crtc++; 140 crtc_id++;
134 cursor = NULL; 141 cursor = NULL;
135 primary = NULL; 142 primary = NULL;
136 }
137 } 143 }
138 } 144 }
139 145
140 drm_vblank_init(drm_dev, crtc); 146 drm_vblank_init(drm_dev, crtc_id);
141 /* Allow usage of vblank without having to call drm_irq_install */ 147 /* Allow usage of vblank without having to call drm_irq_install */
142 drm_dev->irq_enabled = 1; 148 drm_dev->irq_enabled = 1;
143 149
144 DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
145 crtc, plane);
146 DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
147
148 return 0; 150 return 0;
149} 151}
150 152
@@ -179,7 +181,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
179 struct device_node *vtg_np; 181 struct device_node *vtg_np;
180 struct sti_compositor *compo; 182 struct sti_compositor *compo;
181 struct resource *res; 183 struct resource *res;
182 int err;
183 184
184 compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL); 185 compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
185 if (!compo) { 186 if (!compo) {
@@ -187,7 +188,7 @@ static int sti_compositor_probe(struct platform_device *pdev)
187 return -ENOMEM; 188 return -ENOMEM;
188 } 189 }
189 compo->dev = dev; 190 compo->dev = dev;
190 compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb; 191 compo->vtg_vblank_nb.notifier_call = sti_crtc_vblank_cb;
191 192
192 /* populate data structure depending on compatibility */ 193 /* populate data structure depending on compatibility */
193 BUG_ON(!of_match_node(compositor_of_match, np)->data); 194 BUG_ON(!of_match_node(compositor_of_match, np)->data);
@@ -251,12 +252,6 @@ static int sti_compositor_probe(struct platform_device *pdev)
251 if (vtg_np) 252 if (vtg_np)
252 compo->vtg_aux = of_vtg_find(vtg_np); 253 compo->vtg_aux = of_vtg_find(vtg_np);
253 254
254 /* Initialize compositor subdevices */
255 err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
256 compo->data.nb_subdev);
257 if (err)
258 return err;
259
260 platform_set_drvdata(pdev, compo); 255 platform_set_drvdata(pdev, compo);
261 256
262 return component_add(&pdev->dev, &sti_compositor_ops); 257 return component_add(&pdev->dev, &sti_compositor_ops);
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 019eb44c62cc..1a4a73dab11e 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -12,13 +12,13 @@
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14 14
15#include "sti_layer.h"
16#include "sti_mixer.h" 15#include "sti_mixer.h"
16#include "sti_plane.h"
17 17
18#define WAIT_NEXT_VSYNC_MS 50 /*ms*/ 18#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
19 19
20#define STI_MAX_LAYER 8
21#define STI_MAX_MIXER 2 20#define STI_MAX_MIXER 2
21#define STI_MAX_VID 1
22 22
23enum sti_compositor_subdev_type { 23enum sti_compositor_subdev_type {
24 STI_MIXER_MAIN_SUBDEV, 24 STI_MIXER_MAIN_SUBDEV,
@@ -59,11 +59,9 @@ struct sti_compositor_data {
59 * @rst_main: reset control of the main path 59 * @rst_main: reset control of the main path
60 * @rst_aux: reset control of the aux path 60 * @rst_aux: reset control of the aux path
61 * @mixer: array of mixers 61 * @mixer: array of mixers
62 * @vid: array of vids
62 * @vtg_main: vtg for main data path 63 * @vtg_main: vtg for main data path
63 * @vtg_aux: vtg for auxillary data path 64 * @vtg_aux: vtg for auxillary data path
64 * @layer: array of layers
65 * @nb_mixers: number of mixers for this compositor
66 * @nb_layers: number of layers (GDP,VID,...) for this compositor
67 * @vtg_vblank_nb: callback for VTG VSYNC notification 65 * @vtg_vblank_nb: callback for VTG VSYNC notification
68 */ 66 */
69struct sti_compositor { 67struct sti_compositor {
@@ -77,11 +75,9 @@ struct sti_compositor {
77 struct reset_control *rst_main; 75 struct reset_control *rst_main;
78 struct reset_control *rst_aux; 76 struct reset_control *rst_aux;
79 struct sti_mixer *mixer[STI_MAX_MIXER]; 77 struct sti_mixer *mixer[STI_MAX_MIXER];
78 struct sti_vid *vid[STI_MAX_VID];
80 struct sti_vtg *vtg_main; 79 struct sti_vtg *vtg_main;
81 struct sti_vtg *vtg_aux; 80 struct sti_vtg *vtg_aux;
82 struct sti_layer *layer[STI_MAX_LAYER];
83 int nb_mixers;
84 int nb_layers;
85 struct notifier_block vtg_vblank_nb; 81 struct notifier_block vtg_vblank_nb;
86}; 82};
87 83
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 26e63bf14efe..018ffc970e96 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -15,22 +15,20 @@
15#include <drm/drm_plane_helper.h> 15#include <drm/drm_plane_helper.h>
16 16
17#include "sti_compositor.h" 17#include "sti_compositor.h"
18#include "sti_drm_drv.h" 18#include "sti_crtc.h"
19#include "sti_drm_crtc.h" 19#include "sti_drv.h"
20#include "sti_vid.h"
20#include "sti_vtg.h" 21#include "sti_vtg.h"
21 22
22static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 23static void sti_crtc_enable(struct drm_crtc *crtc)
23{
24 DRM_DEBUG_KMS("\n");
25}
26
27static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
28{ 24{
29 struct sti_mixer *mixer = to_sti_mixer(crtc); 25 struct sti_mixer *mixer = to_sti_mixer(crtc);
30 struct device *dev = mixer->dev; 26 struct device *dev = mixer->dev;
31 struct sti_compositor *compo = dev_get_drvdata(dev); 27 struct sti_compositor *compo = dev_get_drvdata(dev);
32 28
33 mixer->enabled = true; 29 DRM_DEBUG_DRIVER("\n");
30
31 mixer->status = STI_MIXER_READY;
34 32
35 /* Prepare and enable the compo IP clock */ 33 /* Prepare and enable the compo IP clock */
36 if (mixer->id == STI_MIXER_MAIN) { 34 if (mixer->id == STI_MIXER_MAIN) {
@@ -41,45 +39,28 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
41 DRM_INFO("Failed to prepare/enable compo_aux clk\n"); 39 DRM_INFO("Failed to prepare/enable compo_aux clk\n");
42 } 40 }
43 41
44 sti_mixer_clear_all_layers(mixer); 42 drm_crtc_vblank_on(crtc);
45} 43}
46 44
47static void sti_drm_crtc_commit(struct drm_crtc *crtc) 45static void sti_crtc_disabling(struct drm_crtc *crtc)
48{ 46{
49 struct sti_mixer *mixer = to_sti_mixer(crtc); 47 struct sti_mixer *mixer = to_sti_mixer(crtc);
50 struct device *dev = mixer->dev;
51 struct sti_compositor *compo = dev_get_drvdata(dev);
52 struct sti_layer *layer;
53
54 if ((!mixer || !compo)) {
55 DRM_ERROR("Can not find mixer or compositor)\n");
56 return;
57 }
58 48
59 /* get GDP which is reserved to the CRTC FB */ 49 DRM_DEBUG_DRIVER("\n");
60 layer = to_sti_layer(crtc->primary);
61 if (layer)
62 sti_layer_commit(layer);
63 else
64 DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
65
66 /* Enable layer on mixer */
67 if (sti_mixer_set_layer_status(mixer, layer, true))
68 DRM_ERROR("Can not enable layer at mixer\n");
69 50
70 drm_crtc_vblank_on(crtc); 51 mixer->status = STI_MIXER_DISABLING;
71} 52}
72 53
73static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc, 54static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
74 const struct drm_display_mode *mode, 55 const struct drm_display_mode *mode,
75 struct drm_display_mode *adjusted_mode) 56 struct drm_display_mode *adjusted_mode)
76{ 57{
77 /* accept the provided drm_display_mode, do not fix it up */ 58 /* accept the provided drm_display_mode, do not fix it up */
78 return true; 59 return true;
79} 60}
80 61
81static int 62static int
82sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) 63sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
83{ 64{
84 struct sti_mixer *mixer = to_sti_mixer(crtc); 65 struct sti_mixer *mixer = to_sti_mixer(crtc);
85 struct device *dev = mixer->dev; 66 struct device *dev = mixer->dev;
@@ -122,22 +103,19 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
122 103
123 res = sti_mixer_active_video_area(mixer, &crtc->mode); 104 res = sti_mixer_active_video_area(mixer, &crtc->mode);
124 if (res) { 105 if (res) {
125 DRM_ERROR("Can not set active video area\n"); 106 DRM_ERROR("Can't set active video area\n");
126 return -EINVAL; 107 return -EINVAL;
127 } 108 }
128 109
129 return res; 110 return res;
130} 111}
131 112
132static void sti_drm_crtc_disable(struct drm_crtc *crtc) 113static void sti_crtc_disable(struct drm_crtc *crtc)
133{ 114{
134 struct sti_mixer *mixer = to_sti_mixer(crtc); 115 struct sti_mixer *mixer = to_sti_mixer(crtc);
135 struct device *dev = mixer->dev; 116 struct device *dev = mixer->dev;
136 struct sti_compositor *compo = dev_get_drvdata(dev); 117 struct sti_compositor *compo = dev_get_drvdata(dev);
137 118
138 if (!mixer->enabled)
139 return;
140
141 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer)); 119 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
142 120
143 /* Disable Background */ 121 /* Disable Background */
@@ -154,18 +132,18 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
154 clk_disable_unprepare(compo->clk_compo_aux); 132 clk_disable_unprepare(compo->clk_compo_aux);
155 } 133 }
156 134
157 mixer->enabled = false; 135 mixer->status = STI_MIXER_DISABLED;
158} 136}
159 137
160static void 138static void
161sti_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 139sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
162{ 140{
163 sti_drm_crtc_prepare(crtc); 141 sti_crtc_enable(crtc);
164 sti_drm_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 142 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
165} 143}
166 144
167static void sti_drm_atomic_begin(struct drm_crtc *crtc, 145static void sti_crtc_atomic_begin(struct drm_crtc *crtc,
168 struct drm_crtc_state *old_crtc_state) 146 struct drm_crtc_state *old_crtc_state)
169{ 147{
170 struct sti_mixer *mixer = to_sti_mixer(crtc); 148 struct sti_mixer *mixer = to_sti_mixer(crtc);
171 149
@@ -179,47 +157,109 @@ static void sti_drm_atomic_begin(struct drm_crtc *crtc,
179 } 157 }
180} 158}
181 159
182static void sti_drm_atomic_flush(struct drm_crtc *crtc, 160static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
183 struct drm_crtc_state *old_crtc_state) 161 struct drm_crtc_state *old_crtc_state)
184{ 162{
163 struct drm_device *drm_dev = crtc->dev;
164 struct sti_mixer *mixer = to_sti_mixer(crtc);
165 struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
166 struct drm_plane *p;
167
168 DRM_DEBUG_DRIVER("\n");
169
170 /* perform plane actions */
171 list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
172 struct sti_plane *plane = to_sti_plane(p);
173
174 switch (plane->status) {
175 case STI_PLANE_UPDATED:
176 /* update planes tag as updated */
177 DRM_DEBUG_DRIVER("update plane %s\n",
178 sti_plane_to_str(plane));
179
180 if (sti_mixer_set_plane_depth(mixer, plane)) {
181 DRM_ERROR("Cannot set plane %s depth\n",
182 sti_plane_to_str(plane));
183 break;
184 }
185
186 if (sti_mixer_set_plane_status(mixer, plane, true)) {
187 DRM_ERROR("Cannot enable plane %s at mixer\n",
188 sti_plane_to_str(plane));
189 break;
190 }
191
192 /* if plane is HQVDP_0 then commit the vid[0] */
193 if (plane->desc == STI_HQVDP_0)
194 sti_vid_commit(compo->vid[0], p->state);
195
196 plane->status = STI_PLANE_READY;
197
198 break;
199 case STI_PLANE_DISABLING:
200 /* disabling sequence for planes tag as disabling */
201 DRM_DEBUG_DRIVER("disable plane %s from mixer\n",
202 sti_plane_to_str(plane));
203
204 if (sti_mixer_set_plane_status(mixer, plane, false)) {
205 DRM_ERROR("Cannot disable plane %s at mixer\n",
206 sti_plane_to_str(plane));
207 continue;
208 }
209
210 if (plane->desc == STI_CURSOR)
211 /* tag plane status for disabled */
212 plane->status = STI_PLANE_DISABLED;
213 else
214 /* tag plane status for flushing */
215 plane->status = STI_PLANE_FLUSHING;
216
217 /* if plane is HQVDP_0 then disable the vid[0] */
218 if (plane->desc == STI_HQVDP_0)
219 sti_vid_disable(compo->vid[0]);
220
221 break;
222 default:
223 /* Other status case are not handled */
224 break;
225 }
226 }
185} 227}
186 228
187static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 229static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
188 .dpms = sti_drm_crtc_dpms, 230 .enable = sti_crtc_enable,
189 .prepare = sti_drm_crtc_prepare, 231 .disable = sti_crtc_disabling,
190 .commit = sti_drm_crtc_commit, 232 .mode_fixup = sti_crtc_mode_fixup,
191 .mode_fixup = sti_drm_crtc_mode_fixup,
192 .mode_set = drm_helper_crtc_mode_set, 233 .mode_set = drm_helper_crtc_mode_set,
193 .mode_set_nofb = sti_drm_crtc_mode_set_nofb, 234 .mode_set_nofb = sti_crtc_mode_set_nofb,
194 .mode_set_base = drm_helper_crtc_mode_set_base, 235 .mode_set_base = drm_helper_crtc_mode_set_base,
195 .disable = sti_drm_crtc_disable, 236 .atomic_begin = sti_crtc_atomic_begin,
196 .atomic_begin = sti_drm_atomic_begin, 237 .atomic_flush = sti_crtc_atomic_flush,
197 .atomic_flush = sti_drm_atomic_flush,
198}; 238};
199 239
200static void sti_drm_crtc_destroy(struct drm_crtc *crtc) 240static void sti_crtc_destroy(struct drm_crtc *crtc)
201{ 241{
202 DRM_DEBUG_KMS("\n"); 242 DRM_DEBUG_KMS("\n");
203 drm_crtc_cleanup(crtc); 243 drm_crtc_cleanup(crtc);
204} 244}
205 245
206static int sti_drm_crtc_set_property(struct drm_crtc *crtc, 246static int sti_crtc_set_property(struct drm_crtc *crtc,
207 struct drm_property *property, 247 struct drm_property *property,
208 uint64_t val) 248 uint64_t val)
209{ 249{
210 DRM_DEBUG_KMS("\n"); 250 DRM_DEBUG_KMS("\n");
211 return 0; 251 return 0;
212} 252}
213 253
214int sti_drm_crtc_vblank_cb(struct notifier_block *nb, 254int sti_crtc_vblank_cb(struct notifier_block *nb,
215 unsigned long event, void *data) 255 unsigned long event, void *data)
216{ 256{
217 struct drm_device *drm_dev; 257 struct drm_device *drm_dev;
218 struct sti_compositor *compo = 258 struct sti_compositor *compo =
219 container_of(nb, struct sti_compositor, vtg_vblank_nb); 259 container_of(nb, struct sti_compositor, vtg_vblank_nb);
220 int *crtc = data; 260 int *crtc = data;
221 unsigned long flags; 261 unsigned long flags;
222 struct sti_drm_private *priv; 262 struct sti_private *priv;
223 263
224 drm_dev = compo->mixer[*crtc]->drm_crtc.dev; 264 drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
225 priv = drm_dev->dev_private; 265 priv = drm_dev->dev_private;
@@ -235,21 +275,38 @@ int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
235 spin_lock_irqsave(&drm_dev->event_lock, flags); 275 spin_lock_irqsave(&drm_dev->event_lock, flags);
236 if (compo->mixer[*crtc]->pending_event) { 276 if (compo->mixer[*crtc]->pending_event) {
237 drm_send_vblank_event(drm_dev, -1, 277 drm_send_vblank_event(drm_dev, -1,
238 compo->mixer[*crtc]->pending_event); 278 compo->mixer[*crtc]->pending_event);
239 drm_vblank_put(drm_dev, *crtc); 279 drm_vblank_put(drm_dev, *crtc);
240 compo->mixer[*crtc]->pending_event = NULL; 280 compo->mixer[*crtc]->pending_event = NULL;
241 } 281 }
242 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 282 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
243 283
284 if (compo->mixer[*crtc]->status == STI_MIXER_DISABLING) {
285 struct drm_plane *p;
286
287 /* Disable mixer only if all overlay planes (GDP and VDP)
288 * are disabled */
289 list_for_each_entry(p, &drm_dev->mode_config.plane_list, head) {
290 struct sti_plane *plane = to_sti_plane(p);
291
292 if ((plane->desc & STI_PLANE_TYPE_MASK) <= STI_VDP)
293 if (plane->status != STI_PLANE_DISABLED)
294 return 0;
295 }
296 sti_crtc_disable(&compo->mixer[*crtc]->drm_crtc);
297 }
298
244 return 0; 299 return 0;
245} 300}
246 301
247int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc) 302int sti_crtc_enable_vblank(struct drm_device *dev, int crtc)
248{ 303{
249 struct sti_drm_private *dev_priv = dev->dev_private; 304 struct sti_private *dev_priv = dev->dev_private;
250 struct sti_compositor *compo = dev_priv->compo; 305 struct sti_compositor *compo = dev_priv->compo;
251 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 306 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
252 307
308 DRM_DEBUG_DRIVER("\n");
309
253 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ? 310 if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
254 compo->vtg_main : compo->vtg_aux, 311 compo->vtg_main : compo->vtg_aux,
255 vtg_vblank_nb, crtc)) { 312 vtg_vblank_nb, crtc)) {
@@ -259,11 +316,11 @@ int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
259 316
260 return 0; 317 return 0;
261} 318}
262EXPORT_SYMBOL(sti_drm_crtc_enable_vblank); 319EXPORT_SYMBOL(sti_crtc_enable_vblank);
263 320
264void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc) 321void sti_crtc_disable_vblank(struct drm_device *drm_dev, int crtc)
265{ 322{
266 struct sti_drm_private *priv = dev->dev_private; 323 struct sti_private *priv = drm_dev->dev_private;
267 struct sti_compositor *compo = priv->compo; 324 struct sti_compositor *compo = priv->compo;
268 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 325 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
269 326
@@ -275,23 +332,23 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
275 332
276 /* free the resources of the pending requests */ 333 /* free the resources of the pending requests */
277 if (compo->mixer[crtc]->pending_event) { 334 if (compo->mixer[crtc]->pending_event) {
278 drm_vblank_put(dev, crtc); 335 drm_vblank_put(drm_dev, crtc);
279 compo->mixer[crtc]->pending_event = NULL; 336 compo->mixer[crtc]->pending_event = NULL;
280 } 337 }
281} 338}
282EXPORT_SYMBOL(sti_drm_crtc_disable_vblank); 339EXPORT_SYMBOL(sti_crtc_disable_vblank);
283 340
284static struct drm_crtc_funcs sti_crtc_funcs = { 341static struct drm_crtc_funcs sti_crtc_funcs = {
285 .set_config = drm_atomic_helper_set_config, 342 .set_config = drm_atomic_helper_set_config,
286 .page_flip = drm_atomic_helper_page_flip, 343 .page_flip = drm_atomic_helper_page_flip,
287 .destroy = sti_drm_crtc_destroy, 344 .destroy = sti_crtc_destroy,
288 .set_property = sti_drm_crtc_set_property, 345 .set_property = sti_crtc_set_property,
289 .reset = drm_atomic_helper_crtc_reset, 346 .reset = drm_atomic_helper_crtc_reset,
290 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 347 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
291 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 348 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
292}; 349};
293 350
294bool sti_drm_crtc_is_main(struct drm_crtc *crtc) 351bool sti_crtc_is_main(struct drm_crtc *crtc)
295{ 352{
296 struct sti_mixer *mixer = to_sti_mixer(crtc); 353 struct sti_mixer *mixer = to_sti_mixer(crtc);
297 354
@@ -300,18 +357,18 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
300 357
301 return false; 358 return false;
302} 359}
303EXPORT_SYMBOL(sti_drm_crtc_is_main); 360EXPORT_SYMBOL(sti_crtc_is_main);
304 361
305int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, 362int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
306 struct drm_plane *primary, struct drm_plane *cursor) 363 struct drm_plane *primary, struct drm_plane *cursor)
307{ 364{
308 struct drm_crtc *crtc = &mixer->drm_crtc; 365 struct drm_crtc *crtc = &mixer->drm_crtc;
309 int res; 366 int res;
310 367
311 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 368 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
312 &sti_crtc_funcs); 369 &sti_crtc_funcs);
313 if (res) { 370 if (res) {
314 DRM_ERROR("Can not initialze CRTC\n"); 371 DRM_ERROR("Can't initialze CRTC\n");
315 return -EINVAL; 372 return -EINVAL;
316 } 373 }
317 374
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
new file mode 100644
index 000000000000..51963e6ddbe7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_CRTC_H_
8#define _STI_CRTC_H_
9
10#include <drm/drmP.h>
11
12struct sti_mixer;
13
14int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
15 struct drm_plane *primary, struct drm_plane *cursor);
16int sti_crtc_enable_vblank(struct drm_device *dev, int crtc);
17void sti_crtc_disable_vblank(struct drm_device *dev, int crtc);
18int sti_crtc_vblank_cb(struct notifier_block *nb,
19 unsigned long event, void *data);
20bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
21
22#endif
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 010eaee60bf7..dd1032195051 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -7,8 +7,14 @@
7 */ 7 */
8#include <drm/drmP.h> 8#include <drm/drmP.h>
9 9
10#include <drm/drm_atomic_helper.h>
11#include <drm/drm_fb_cma_helper.h>
12#include <drm/drm_gem_cma_helper.h>
13#include <drm/drm_plane_helper.h>
14
15#include "sti_compositor.h"
10#include "sti_cursor.h" 16#include "sti_cursor.h"
11#include "sti_layer.h" 17#include "sti_plane.h"
12#include "sti_vtg.h" 18#include "sti_vtg.h"
13 19
14/* Registers */ 20/* Registers */
@@ -42,15 +48,19 @@ struct dma_pixmap {
42/** 48/**
43 * STI Cursor structure 49 * STI Cursor structure
44 * 50 *
45 * @layer: layer structure 51 * @sti_plane: sti_plane structure
46 * @width: cursor width 52 * @dev: driver device
47 * @height: cursor height 53 * @regs: cursor registers
48 * @clut: color look up table 54 * @width: cursor width
49 * @clut_paddr: color look up table physical address 55 * @height: cursor height
50 * @pixmap: pixmap dma buffer (clut8-format cursor) 56 * @clut: color look up table
57 * @clut_paddr: color look up table physical address
58 * @pixmap: pixmap dma buffer (clut8-format cursor)
51 */ 59 */
52struct sti_cursor { 60struct sti_cursor {
53 struct sti_layer layer; 61 struct sti_plane plane;
62 struct device *dev;
63 void __iomem *regs;
54 unsigned int width; 64 unsigned int width;
55 unsigned int height; 65 unsigned int height;
56 unsigned short *clut; 66 unsigned short *clut;
@@ -62,22 +72,10 @@ static const uint32_t cursor_supported_formats[] = {
62 DRM_FORMAT_ARGB8888, 72 DRM_FORMAT_ARGB8888,
63}; 73};
64 74
65#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer) 75#define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
66
67static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
68{
69 return cursor_supported_formats;
70}
71
72static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
73{
74 return ARRAY_SIZE(cursor_supported_formats);
75}
76 76
77static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer) 77static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
78{ 78{
79 struct sti_cursor *cursor = to_sti_cursor(layer);
80 u32 *src = layer->vaddr;
81 u8 *dst = cursor->pixmap.base; 79 u8 *dst = cursor->pixmap.base;
82 unsigned int i, j; 80 unsigned int i, j;
83 u32 a, r, g, b; 81 u32 a, r, g, b;
@@ -96,127 +94,155 @@ static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
96 } 94 }
97} 95}
98 96
99static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare) 97static void sti_cursor_init(struct sti_cursor *cursor)
100{ 98{
101 struct sti_cursor *cursor = to_sti_cursor(layer); 99 unsigned short *base = cursor->clut;
102 struct drm_display_mode *mode = layer->mode; 100 unsigned int a, r, g, b;
101
102 /* Assign CLUT values, ARGB444 format */
103 for (a = 0; a < 4; a++)
104 for (r = 0; r < 4; r++)
105 for (g = 0; g < 4; g++)
106 for (b = 0; b < 4; b++)
107 *base++ = (a * 5) << 12 |
108 (r * 5) << 8 |
109 (g * 5) << 4 |
110 (b * 5);
111}
112
113static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
114 struct drm_plane_state *oldstate)
115{
116 struct drm_plane_state *state = drm_plane->state;
117 struct sti_plane *plane = to_sti_plane(drm_plane);
118 struct sti_cursor *cursor = to_sti_cursor(plane);
119 struct drm_crtc *crtc = state->crtc;
120 struct sti_mixer *mixer = to_sti_mixer(crtc);
121 struct drm_framebuffer *fb = state->fb;
122 struct drm_display_mode *mode = &crtc->mode;
123 int dst_x = state->crtc_x;
124 int dst_y = state->crtc_y;
125 int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
126 int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
127 /* src_x are in 16.16 format */
128 int src_w = state->src_w >> 16;
129 int src_h = state->src_h >> 16;
130 bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
131 struct drm_gem_cma_object *cma_obj;
103 u32 y, x; 132 u32 y, x;
104 u32 val; 133 u32 val;
105 134
106 DRM_DEBUG_DRIVER("\n"); 135 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
136 crtc->base.id, sti_mixer_to_str(mixer),
137 drm_plane->base.id, sti_plane_to_str(plane));
138 DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
107 139
108 dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer)); 140 dev_dbg(cursor->dev, "%s %s\n", __func__,
141 sti_plane_to_str(plane));
109 142
110 if (layer->src_w < STI_CURS_MIN_SIZE || 143 if (src_w < STI_CURS_MIN_SIZE ||
111 layer->src_h < STI_CURS_MIN_SIZE || 144 src_h < STI_CURS_MIN_SIZE ||
112 layer->src_w > STI_CURS_MAX_SIZE || 145 src_w > STI_CURS_MAX_SIZE ||
113 layer->src_h > STI_CURS_MAX_SIZE) { 146 src_h > STI_CURS_MAX_SIZE) {
114 DRM_ERROR("Invalid cursor size (%dx%d)\n", 147 DRM_ERROR("Invalid cursor size (%dx%d)\n",
115 layer->src_w, layer->src_h); 148 src_w, src_h);
116 return -EINVAL; 149 return;
117 } 150 }
118 151
119 /* If the cursor size has changed, re-allocated the pixmap */ 152 /* If the cursor size has changed, re-allocated the pixmap */
120 if (!cursor->pixmap.base || 153 if (!cursor->pixmap.base ||
121 (cursor->width != layer->src_w) || 154 (cursor->width != src_w) ||
122 (cursor->height != layer->src_h)) { 155 (cursor->height != src_h)) {
123 cursor->width = layer->src_w; 156 cursor->width = src_w;
124 cursor->height = layer->src_h; 157 cursor->height = src_h;
125 158
126 if (cursor->pixmap.base) 159 if (cursor->pixmap.base)
127 dma_free_writecombine(layer->dev, 160 dma_free_writecombine(cursor->dev,
128 cursor->pixmap.size, 161 cursor->pixmap.size,
129 cursor->pixmap.base, 162 cursor->pixmap.base,
130 cursor->pixmap.paddr); 163 cursor->pixmap.paddr);
131 164
132 cursor->pixmap.size = cursor->width * cursor->height; 165 cursor->pixmap.size = cursor->width * cursor->height;
133 166
134 cursor->pixmap.base = dma_alloc_writecombine(layer->dev, 167 cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
135 cursor->pixmap.size, 168 cursor->pixmap.size,
136 &cursor->pixmap.paddr, 169 &cursor->pixmap.paddr,
137 GFP_KERNEL | GFP_DMA); 170 GFP_KERNEL | GFP_DMA);
138 if (!cursor->pixmap.base) { 171 if (!cursor->pixmap.base) {
139 DRM_ERROR("Failed to allocate memory for pixmap\n"); 172 DRM_ERROR("Failed to allocate memory for pixmap\n");
140 return -ENOMEM; 173 return;
141 } 174 }
142 } 175 }
143 176
177 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
178 if (!cma_obj) {
179 DRM_ERROR("Can't get CMA GEM object for fb\n");
180 return;
181 }
182
144 /* Convert ARGB8888 to CLUT8 */ 183 /* Convert ARGB8888 to CLUT8 */
145 sti_cursor_argb8888_to_clut8(layer); 184 sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
146 185
147 /* AWS and AWE depend on the mode */ 186 /* AWS and AWE depend on the mode */
148 y = sti_vtg_get_line_number(*mode, 0); 187 y = sti_vtg_get_line_number(*mode, 0);
149 x = sti_vtg_get_pixel_number(*mode, 0); 188 x = sti_vtg_get_pixel_number(*mode, 0);
150 val = y << 16 | x; 189 val = y << 16 | x;
151 writel(val, layer->regs + CUR_AWS); 190 writel(val, cursor->regs + CUR_AWS);
152 y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1); 191 y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
153 x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1); 192 x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
154 val = y << 16 | x; 193 val = y << 16 | x;
155 writel(val, layer->regs + CUR_AWE); 194 writel(val, cursor->regs + CUR_AWE);
156 195
157 if (first_prepare) { 196 if (first_prepare) {
158 /* Set and fetch CLUT */ 197 /* Set and fetch CLUT */
159 writel(cursor->clut_paddr, layer->regs + CUR_CML); 198 writel(cursor->clut_paddr, cursor->regs + CUR_CML);
160 writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL); 199 writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
161 } 200 }
162 201
163 return 0;
164}
165
166static int sti_cursor_commit_layer(struct sti_layer *layer)
167{
168 struct sti_cursor *cursor = to_sti_cursor(layer);
169 struct drm_display_mode *mode = layer->mode;
170 u32 ydo, xdo;
171
172 dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
173
174 /* Set memory location, size, and position */ 202 /* Set memory location, size, and position */
175 writel(cursor->pixmap.paddr, layer->regs + CUR_PML); 203 writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
176 writel(cursor->width, layer->regs + CUR_PMP); 204 writel(cursor->width, cursor->regs + CUR_PMP);
177 writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE); 205 writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
178 206
179 ydo = sti_vtg_get_line_number(*mode, layer->dst_y); 207 y = sti_vtg_get_line_number(*mode, dst_y);
180 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y); 208 x = sti_vtg_get_pixel_number(*mode, dst_y);
181 writel((ydo << 16) | xdo, layer->regs + CUR_VPO); 209 writel((y << 16) | x, cursor->regs + CUR_VPO);
182 210
183 return 0; 211 plane->status = STI_PLANE_UPDATED;
184} 212}
185 213
186static int sti_cursor_disable_layer(struct sti_layer *layer) 214static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
215 struct drm_plane_state *oldstate)
187{ 216{
188 return 0; 217 struct sti_plane *plane = to_sti_plane(drm_plane);
189} 218 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
190 219
191static void sti_cursor_init(struct sti_layer *layer) 220 if (!drm_plane->crtc) {
192{ 221 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
193 struct sti_cursor *cursor = to_sti_cursor(layer); 222 drm_plane->base.id);
194 unsigned short *base = cursor->clut; 223 return;
195 unsigned int a, r, g, b; 224 }
196 225
197 /* Assign CLUT values, ARGB444 format */ 226 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
198 for (a = 0; a < 4; a++) 227 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
199 for (r = 0; r < 4; r++) 228 drm_plane->base.id, sti_plane_to_str(plane));
200 for (g = 0; g < 4; g++) 229
201 for (b = 0; b < 4; b++) 230 plane->status = STI_PLANE_DISABLING;
202 *base++ = (a * 5) << 12 |
203 (r * 5) << 8 |
204 (g * 5) << 4 |
205 (b * 5);
206} 231}
207 232
208static const struct sti_layer_funcs cursor_ops = { 233static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
209 .get_formats = sti_cursor_get_formats, 234 .atomic_update = sti_cursor_atomic_update,
210 .get_nb_formats = sti_cursor_get_nb_formats, 235 .atomic_disable = sti_cursor_atomic_disable,
211 .init = sti_cursor_init,
212 .prepare = sti_cursor_prepare_layer,
213 .commit = sti_cursor_commit_layer,
214 .disable = sti_cursor_disable_layer,
215}; 236};
216 237
217struct sti_layer *sti_cursor_create(struct device *dev) 238struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
239 struct device *dev, int desc,
240 void __iomem *baseaddr,
241 unsigned int possible_crtcs)
218{ 242{
219 struct sti_cursor *cursor; 243 struct sti_cursor *cursor;
244 size_t size;
245 int res;
220 246
221 cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL); 247 cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
222 if (!cursor) { 248 if (!cursor) {
@@ -225,18 +251,43 @@ struct sti_layer *sti_cursor_create(struct device *dev)
225 } 251 }
226 252
227 /* Allocate clut buffer */ 253 /* Allocate clut buffer */
228 cursor->clut = dma_alloc_writecombine(dev, 254 size = 0x100 * sizeof(unsigned short);
229 0x100 * sizeof(unsigned short), 255 cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
230 &cursor->clut_paddr, 256 GFP_KERNEL | GFP_DMA);
231 GFP_KERNEL | GFP_DMA);
232 257
233 if (!cursor->clut) { 258 if (!cursor->clut) {
234 DRM_ERROR("Failed to allocate memory for cursor clut\n"); 259 DRM_ERROR("Failed to allocate memory for cursor clut\n");
235 devm_kfree(dev, cursor); 260 goto err_clut;
236 return NULL; 261 }
262
263 cursor->dev = dev;
264 cursor->regs = baseaddr;
265 cursor->plane.desc = desc;
266 cursor->plane.status = STI_PLANE_DISABLED;
267
268 sti_cursor_init(cursor);
269
270 res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
271 possible_crtcs,
272 &sti_plane_helpers_funcs,
273 cursor_supported_formats,
274 ARRAY_SIZE(cursor_supported_formats),
275 DRM_PLANE_TYPE_CURSOR);
276 if (res) {
277 DRM_ERROR("Failed to initialize universal plane\n");
278 goto err_plane;
237 } 279 }
238 280
239 cursor->layer.ops = &cursor_ops; 281 drm_plane_helper_add(&cursor->plane.drm_plane,
282 &sti_cursor_helpers_funcs);
283
284 sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
285
286 return &cursor->plane.drm_plane;
240 287
241 return (struct sti_layer *)cursor; 288err_plane:
289 dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
290err_clut:
291 devm_kfree(dev, cursor);
292 return NULL;
242} 293}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
index 3c9827404f27..2ee5c10e8b33 100644
--- a/drivers/gpu/drm/sti/sti_cursor.h
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -7,6 +7,9 @@
7#ifndef _STI_CURSOR_H_ 7#ifndef _STI_CURSOR_H_
8#define _STI_CURSOR_H_ 8#define _STI_CURSOR_H_
9 9
10struct sti_layer *sti_cursor_create(struct device *dev); 10struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
11 struct device *dev, int desc,
12 void __iomem *baseaddr,
13 unsigned int possible_crtcs);
11 14
12#endif 15#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
deleted file mode 100644
index caca8b14f017..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_crtc.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_DRM_CRTC_H_
8#define _STI_DRM_CRTC_H_
9
10#include <drm/drmP.h>
11
12struct sti_mixer;
13
14int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
15 struct drm_plane *primary, struct drm_plane *cursor);
16int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
17void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
18int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
19 unsigned long event, void *data);
20bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
21
22#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
deleted file mode 100644
index 64d4ed43dda3..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ /dev/null
@@ -1,251 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_atomic_helper.h>
11#include <drm/drm_plane_helper.h>
12
13#include "sti_compositor.h"
14#include "sti_drm_drv.h"
15#include "sti_drm_plane.h"
16#include "sti_vtg.h"
17
18enum sti_layer_desc sti_layer_default_zorder[] = {
19 STI_GDP_0,
20 STI_VID_0,
21 STI_GDP_1,
22 STI_VID_1,
23 STI_GDP_2,
24 STI_GDP_3,
25};
26
27/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
28
29static int
30sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
31 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
32 unsigned int crtc_w, unsigned int crtc_h,
33 uint32_t src_x, uint32_t src_y,
34 uint32_t src_w, uint32_t src_h)
35{
36 struct sti_layer *layer = to_sti_layer(plane);
37 struct sti_mixer *mixer = to_sti_mixer(crtc);
38 int res;
39
40 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
41 crtc->base.id, sti_mixer_to_str(mixer),
42 plane->base.id, sti_layer_to_str(layer));
43 DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
44
45 res = sti_mixer_set_layer_depth(mixer, layer);
46 if (res) {
47 DRM_ERROR("Can not set layer depth\n");
48 return res;
49 }
50
51 /* src_x are in 16.16 format. */
52 res = sti_layer_prepare(layer, crtc, fb,
53 &crtc->mode, mixer->id,
54 crtc_x, crtc_y, crtc_w, crtc_h,
55 src_x >> 16, src_y >> 16,
56 src_w >> 16, src_h >> 16);
57 if (res) {
58 DRM_ERROR("Layer prepare failed\n");
59 return res;
60 }
61
62 res = sti_layer_commit(layer);
63 if (res) {
64 DRM_ERROR("Layer commit failed\n");
65 return res;
66 }
67
68 res = sti_mixer_set_layer_status(mixer, layer, true);
69 if (res) {
70 DRM_ERROR("Can not enable layer at mixer\n");
71 return res;
72 }
73
74 return 0;
75}
76
77static int sti_drm_disable_plane(struct drm_plane *plane)
78{
79 struct sti_layer *layer;
80 struct sti_mixer *mixer;
81 int lay_res, mix_res;
82
83 if (!plane->crtc) {
84 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
85 return 0;
86 }
87 layer = to_sti_layer(plane);
88 mixer = to_sti_mixer(plane->crtc);
89
90 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
91 plane->crtc->base.id, sti_mixer_to_str(mixer),
92 plane->base.id, sti_layer_to_str(layer));
93
94 /* Disable layer at mixer level */
95 mix_res = sti_mixer_set_layer_status(mixer, layer, false);
96 if (mix_res)
97 DRM_ERROR("Can not disable layer at mixer\n");
98
99 /* Wait a while to be sure that a Vsync event is received */
100 msleep(WAIT_NEXT_VSYNC_MS);
101
102 /* Then disable layer itself */
103 lay_res = sti_layer_disable(layer);
104 if (lay_res)
105 DRM_ERROR("Layer disable failed\n");
106
107 if (lay_res || mix_res)
108 return -EINVAL;
109
110 return 0;
111}
112
113static void sti_drm_plane_destroy(struct drm_plane *plane)
114{
115 DRM_DEBUG_DRIVER("\n");
116
117 drm_plane_helper_disable(plane);
118 drm_plane_cleanup(plane);
119}
120
121static int sti_drm_plane_set_property(struct drm_plane *plane,
122 struct drm_property *property,
123 uint64_t val)
124{
125 struct drm_device *dev = plane->dev;
126 struct sti_drm_private *private = dev->dev_private;
127 struct sti_layer *layer = to_sti_layer(plane);
128
129 DRM_DEBUG_DRIVER("\n");
130
131 if (property == private->plane_zorder_property) {
132 layer->zorder = val;
133 return 0;
134 }
135
136 return -EINVAL;
137}
138
139static struct drm_plane_funcs sti_drm_plane_funcs = {
140 .update_plane = drm_atomic_helper_update_plane,
141 .disable_plane = drm_atomic_helper_disable_plane,
142 .destroy = sti_drm_plane_destroy,
143 .set_property = sti_drm_plane_set_property,
144 .reset = drm_atomic_helper_plane_reset,
145 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
146 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
147};
148
149static int sti_drm_plane_prepare_fb(struct drm_plane *plane,
150 struct drm_framebuffer *fb,
151 const struct drm_plane_state *new_state)
152{
153 return 0;
154}
155
156static void sti_drm_plane_cleanup_fb(struct drm_plane *plane,
157 struct drm_framebuffer *fb,
158 const struct drm_plane_state *old_fb)
159{
160}
161
162static int sti_drm_plane_atomic_check(struct drm_plane *plane,
163 struct drm_plane_state *state)
164{
165 return 0;
166}
167
168static void sti_drm_plane_atomic_update(struct drm_plane *plane,
169 struct drm_plane_state *oldstate)
170{
171 struct drm_plane_state *state = plane->state;
172
173 sti_drm_update_plane(plane, state->crtc, state->fb,
174 state->crtc_x, state->crtc_y,
175 state->crtc_w, state->crtc_h,
176 state->src_x, state->src_y,
177 state->src_w, state->src_h);
178}
179
180static void sti_drm_plane_atomic_disable(struct drm_plane *plane,
181 struct drm_plane_state *oldstate)
182{
183 sti_drm_disable_plane(plane);
184}
185
186static const struct drm_plane_helper_funcs sti_drm_plane_helpers_funcs = {
187 .prepare_fb = sti_drm_plane_prepare_fb,
188 .cleanup_fb = sti_drm_plane_cleanup_fb,
189 .atomic_check = sti_drm_plane_atomic_check,
190 .atomic_update = sti_drm_plane_atomic_update,
191 .atomic_disable = sti_drm_plane_atomic_disable,
192};
193
194static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
195 uint64_t default_val)
196{
197 struct drm_device *dev = plane->dev;
198 struct sti_drm_private *private = dev->dev_private;
199 struct drm_property *prop;
200 struct sti_layer *layer = to_sti_layer(plane);
201
202 prop = private->plane_zorder_property;
203 if (!prop) {
204 prop = drm_property_create_range(dev, 0, "zpos", 0,
205 GAM_MIXER_NB_DEPTH_LEVEL - 1);
206 if (!prop)
207 return;
208
209 private->plane_zorder_property = prop;
210 }
211
212 drm_object_attach_property(&plane->base, prop, default_val);
213 layer->zorder = default_val;
214}
215
216struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
217 struct sti_layer *layer,
218 unsigned int possible_crtcs,
219 enum drm_plane_type type)
220{
221 int err, i;
222 uint64_t default_zorder = 0;
223
224 err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
225 &sti_drm_plane_funcs,
226 sti_layer_get_formats(layer),
227 sti_layer_get_nb_formats(layer), type);
228 if (err) {
229 DRM_ERROR("Failed to initialize plane\n");
230 return NULL;
231 }
232
233 drm_plane_helper_add(&layer->plane, &sti_drm_plane_helpers_funcs);
234
235 for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
236 if (sti_layer_default_zorder[i] == layer->desc)
237 break;
238
239 default_zorder = i + 1;
240
241 if (type == DRM_PLANE_TYPE_OVERLAY)
242 sti_drm_plane_attach_zorder_property(&layer->plane,
243 default_zorder);
244
245 DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
246 layer->plane.base.id,
247 sti_layer_to_str(layer), default_zorder);
248
249 return &layer->plane;
250}
251EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
deleted file mode 100644
index 4f191839f2a7..000000000000
--- a/drivers/gpu/drm/sti/sti_drm_plane.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_DRM_PLANE_H_
8#define _STI_DRM_PLANE_H_
9
10#include <drm/drmP.h>
11
12struct sti_layer;
13
14struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
15 struct sti_layer *layer,
16 unsigned int possible_crtcs,
17 enum drm_plane_type type);
18#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 59d558b400b3..6f4af6a8ba1b 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -18,8 +18,8 @@
18#include <drm/drm_gem_cma_helper.h> 18#include <drm/drm_gem_cma_helper.h>
19#include <drm/drm_fb_cma_helper.h> 19#include <drm/drm_fb_cma_helper.h>
20 20
21#include "sti_drm_drv.h" 21#include "sti_crtc.h"
22#include "sti_drm_crtc.h" 22#include "sti_drv.h"
23 23
24#define DRIVER_NAME "sti" 24#define DRIVER_NAME "sti"
25#define DRIVER_DESC "STMicroelectronics SoC DRM" 25#define DRIVER_DESC "STMicroelectronics SoC DRM"
@@ -30,15 +30,15 @@
30#define STI_MAX_FB_HEIGHT 4096 30#define STI_MAX_FB_HEIGHT 4096
31#define STI_MAX_FB_WIDTH 4096 31#define STI_MAX_FB_WIDTH 4096
32 32
33static void sti_drm_atomic_schedule(struct sti_drm_private *private, 33static void sti_atomic_schedule(struct sti_private *private,
34 struct drm_atomic_state *state) 34 struct drm_atomic_state *state)
35{ 35{
36 private->commit.state = state; 36 private->commit.state = state;
37 schedule_work(&private->commit.work); 37 schedule_work(&private->commit.work);
38} 38}
39 39
40static void sti_drm_atomic_complete(struct sti_drm_private *private, 40static void sti_atomic_complete(struct sti_private *private,
41 struct drm_atomic_state *state) 41 struct drm_atomic_state *state)
42{ 42{
43 struct drm_device *drm = private->drm_dev; 43 struct drm_device *drm = private->drm_dev;
44 44
@@ -68,18 +68,18 @@ static void sti_drm_atomic_complete(struct sti_drm_private *private,
68 drm_atomic_state_free(state); 68 drm_atomic_state_free(state);
69} 69}
70 70
71static void sti_drm_atomic_work(struct work_struct *work) 71static void sti_atomic_work(struct work_struct *work)
72{ 72{
73 struct sti_drm_private *private = container_of(work, 73 struct sti_private *private = container_of(work,
74 struct sti_drm_private, commit.work); 74 struct sti_private, commit.work);
75 75
76 sti_drm_atomic_complete(private, private->commit.state); 76 sti_atomic_complete(private, private->commit.state);
77} 77}
78 78
79static int sti_drm_atomic_commit(struct drm_device *drm, 79static int sti_atomic_commit(struct drm_device *drm,
80 struct drm_atomic_state *state, bool async) 80 struct drm_atomic_state *state, bool async)
81{ 81{
82 struct sti_drm_private *private = drm->dev_private; 82 struct sti_private *private = drm->dev_private;
83 int err; 83 int err;
84 84
85 err = drm_atomic_helper_prepare_planes(drm, state); 85 err = drm_atomic_helper_prepare_planes(drm, state);
@@ -99,21 +99,21 @@ static int sti_drm_atomic_commit(struct drm_device *drm,
99 drm_atomic_helper_swap_state(drm, state); 99 drm_atomic_helper_swap_state(drm, state);
100 100
101 if (async) 101 if (async)
102 sti_drm_atomic_schedule(private, state); 102 sti_atomic_schedule(private, state);
103 else 103 else
104 sti_drm_atomic_complete(private, state); 104 sti_atomic_complete(private, state);
105 105
106 mutex_unlock(&private->commit.lock); 106 mutex_unlock(&private->commit.lock);
107 return 0; 107 return 0;
108} 108}
109 109
110static struct drm_mode_config_funcs sti_drm_mode_config_funcs = { 110static struct drm_mode_config_funcs sti_mode_config_funcs = {
111 .fb_create = drm_fb_cma_create, 111 .fb_create = drm_fb_cma_create,
112 .atomic_check = drm_atomic_helper_check, 112 .atomic_check = drm_atomic_helper_check,
113 .atomic_commit = sti_drm_atomic_commit, 113 .atomic_commit = sti_atomic_commit,
114}; 114};
115 115
116static void sti_drm_mode_config_init(struct drm_device *dev) 116static void sti_mode_config_init(struct drm_device *dev)
117{ 117{
118 dev->mode_config.min_width = 0; 118 dev->mode_config.min_width = 0;
119 dev->mode_config.min_height = 0; 119 dev->mode_config.min_height = 0;
@@ -126,15 +126,15 @@ static void sti_drm_mode_config_init(struct drm_device *dev)
126 dev->mode_config.max_width = STI_MAX_FB_HEIGHT; 126 dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
127 dev->mode_config.max_height = STI_MAX_FB_WIDTH; 127 dev->mode_config.max_height = STI_MAX_FB_WIDTH;
128 128
129 dev->mode_config.funcs = &sti_drm_mode_config_funcs; 129 dev->mode_config.funcs = &sti_mode_config_funcs;
130} 130}
131 131
132static int sti_drm_load(struct drm_device *dev, unsigned long flags) 132static int sti_load(struct drm_device *dev, unsigned long flags)
133{ 133{
134 struct sti_drm_private *private; 134 struct sti_private *private;
135 int ret; 135 int ret;
136 136
137 private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL); 137 private = kzalloc(sizeof(*private), GFP_KERNEL);
138 if (!private) { 138 if (!private) {
139 DRM_ERROR("Failed to allocate private\n"); 139 DRM_ERROR("Failed to allocate private\n");
140 return -ENOMEM; 140 return -ENOMEM;
@@ -143,12 +143,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
143 private->drm_dev = dev; 143 private->drm_dev = dev;
144 144
145 mutex_init(&private->commit.lock); 145 mutex_init(&private->commit.lock);
146 INIT_WORK(&private->commit.work, sti_drm_atomic_work); 146 INIT_WORK(&private->commit.work, sti_atomic_work);
147 147
148 drm_mode_config_init(dev); 148 drm_mode_config_init(dev);
149 drm_kms_helper_poll_init(dev); 149 drm_kms_helper_poll_init(dev);
150 150
151 sti_drm_mode_config_init(dev); 151 sti_mode_config_init(dev);
152 152
153 ret = component_bind_all(dev->dev, dev); 153 ret = component_bind_all(dev->dev, dev);
154 if (ret) { 154 if (ret) {
@@ -162,13 +162,13 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
162 162
163#ifdef CONFIG_DRM_STI_FBDEV 163#ifdef CONFIG_DRM_STI_FBDEV
164 drm_fbdev_cma_init(dev, 32, 164 drm_fbdev_cma_init(dev, 32,
165 dev->mode_config.num_crtc, 165 dev->mode_config.num_crtc,
166 dev->mode_config.num_connector); 166 dev->mode_config.num_connector);
167#endif 167#endif
168 return 0; 168 return 0;
169} 169}
170 170
171static const struct file_operations sti_drm_driver_fops = { 171static const struct file_operations sti_driver_fops = {
172 .owner = THIS_MODULE, 172 .owner = THIS_MODULE,
173 .open = drm_open, 173 .open = drm_open,
174 .mmap = drm_gem_cma_mmap, 174 .mmap = drm_gem_cma_mmap,
@@ -181,33 +181,33 @@ static const struct file_operations sti_drm_driver_fops = {
181 .release = drm_release, 181 .release = drm_release,
182}; 182};
183 183
184static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev, 184static struct dma_buf *sti_gem_prime_export(struct drm_device *dev,
185 struct drm_gem_object *obj, 185 struct drm_gem_object *obj,
186 int flags) 186 int flags)
187{ 187{
188 /* we want to be able to write in mmapped buffer */ 188 /* we want to be able to write in mmapped buffer */
189 flags |= O_RDWR; 189 flags |= O_RDWR;
190 return drm_gem_prime_export(dev, obj, flags); 190 return drm_gem_prime_export(dev, obj, flags);
191} 191}
192 192
193static struct drm_driver sti_drm_driver = { 193static struct drm_driver sti_driver = {
194 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | 194 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
195 DRIVER_GEM | DRIVER_PRIME, 195 DRIVER_GEM | DRIVER_PRIME,
196 .load = sti_drm_load, 196 .load = sti_load,
197 .gem_free_object = drm_gem_cma_free_object, 197 .gem_free_object = drm_gem_cma_free_object,
198 .gem_vm_ops = &drm_gem_cma_vm_ops, 198 .gem_vm_ops = &drm_gem_cma_vm_ops,
199 .dumb_create = drm_gem_cma_dumb_create, 199 .dumb_create = drm_gem_cma_dumb_create,
200 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 200 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
201 .dumb_destroy = drm_gem_dumb_destroy, 201 .dumb_destroy = drm_gem_dumb_destroy,
202 .fops = &sti_drm_driver_fops, 202 .fops = &sti_driver_fops,
203 203
204 .get_vblank_counter = drm_vblank_count, 204 .get_vblank_counter = drm_vblank_count,
205 .enable_vblank = sti_drm_crtc_enable_vblank, 205 .enable_vblank = sti_crtc_enable_vblank,
206 .disable_vblank = sti_drm_crtc_disable_vblank, 206 .disable_vblank = sti_crtc_disable_vblank,
207 207
208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
210 .gem_prime_export = sti_drm_gem_prime_export, 210 .gem_prime_export = sti_gem_prime_export,
211 .gem_prime_import = drm_gem_prime_import, 211 .gem_prime_import = drm_gem_prime_import,
212 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 212 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
213 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 213 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
@@ -227,30 +227,32 @@ static int compare_of(struct device *dev, void *data)
227 return dev->of_node == data; 227 return dev->of_node == data;
228} 228}
229 229
230static int sti_drm_bind(struct device *dev) 230static int sti_bind(struct device *dev)
231{ 231{
232 return drm_platform_init(&sti_drm_driver, to_platform_device(dev)); 232 return drm_platform_init(&sti_driver, to_platform_device(dev));
233} 233}
234 234
235static void sti_drm_unbind(struct device *dev) 235static void sti_unbind(struct device *dev)
236{ 236{
237 drm_put_dev(dev_get_drvdata(dev)); 237 drm_put_dev(dev_get_drvdata(dev));
238} 238}
239 239
240static const struct component_master_ops sti_drm_ops = { 240static const struct component_master_ops sti_ops = {
241 .bind = sti_drm_bind, 241 .bind = sti_bind,
242 .unbind = sti_drm_unbind, 242 .unbind = sti_unbind,
243}; 243};
244 244
245static int sti_drm_master_probe(struct platform_device *pdev) 245static int sti_platform_probe(struct platform_device *pdev)
246{ 246{
247 struct device *dev = &pdev->dev; 247 struct device *dev = &pdev->dev;
248 struct device_node *node = dev->parent->of_node; 248 struct device_node *node = dev->of_node;
249 struct device_node *child_np; 249 struct device_node *child_np;
250 struct component_match *match = NULL; 250 struct component_match *match = NULL;
251 251
252 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 252 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
253 253
254 of_platform_populate(node, NULL, NULL, dev);
255
254 child_np = of_get_next_available_child(node, NULL); 256 child_np = of_get_next_available_child(node, NULL);
255 257
256 while (child_np) { 258 while (child_np) {
@@ -259,68 +261,33 @@ static int sti_drm_master_probe(struct platform_device *pdev)
259 child_np = of_get_next_available_child(node, child_np); 261 child_np = of_get_next_available_child(node, child_np);
260 } 262 }
261 263
262 return component_master_add_with_match(dev, &sti_drm_ops, match); 264 return component_master_add_with_match(dev, &sti_ops, match);
263}
264
265static int sti_drm_master_remove(struct platform_device *pdev)
266{
267 component_master_del(&pdev->dev, &sti_drm_ops);
268 return 0;
269} 265}
270 266
271static struct platform_driver sti_drm_master_driver = { 267static int sti_platform_remove(struct platform_device *pdev)
272 .probe = sti_drm_master_probe,
273 .remove = sti_drm_master_remove,
274 .driver = {
275 .name = DRIVER_NAME "__master",
276 },
277};
278
279static int sti_drm_platform_probe(struct platform_device *pdev)
280{ 268{
281 struct device *dev = &pdev->dev; 269 component_master_del(&pdev->dev, &sti_ops);
282 struct device_node *node = dev->of_node;
283 struct platform_device *master;
284
285 of_platform_populate(node, NULL, NULL, dev);
286
287 platform_driver_register(&sti_drm_master_driver);
288 master = platform_device_register_resndata(dev,
289 DRIVER_NAME "__master", -1,
290 NULL, 0, NULL, 0);
291 if (IS_ERR(master))
292 return PTR_ERR(master);
293
294 platform_set_drvdata(pdev, master);
295 return 0;
296}
297
298static int sti_drm_platform_remove(struct platform_device *pdev)
299{
300 struct platform_device *master = platform_get_drvdata(pdev);
301
302 of_platform_depopulate(&pdev->dev); 270 of_platform_depopulate(&pdev->dev);
303 platform_device_unregister(master); 271
304 platform_driver_unregister(&sti_drm_master_driver);
305 return 0; 272 return 0;
306} 273}
307 274
308static const struct of_device_id sti_drm_dt_ids[] = { 275static const struct of_device_id sti_dt_ids[] = {
309 { .compatible = "st,sti-display-subsystem", }, 276 { .compatible = "st,sti-display-subsystem", },
310 { /* end node */ }, 277 { /* end node */ },
311}; 278};
312MODULE_DEVICE_TABLE(of, sti_drm_dt_ids); 279MODULE_DEVICE_TABLE(of, sti_dt_ids);
313 280
314static struct platform_driver sti_drm_platform_driver = { 281static struct platform_driver sti_platform_driver = {
315 .probe = sti_drm_platform_probe, 282 .probe = sti_platform_probe,
316 .remove = sti_drm_platform_remove, 283 .remove = sti_platform_remove,
317 .driver = { 284 .driver = {
318 .name = DRIVER_NAME, 285 .name = DRIVER_NAME,
319 .of_match_table = sti_drm_dt_ids, 286 .of_match_table = sti_dt_ids,
320 }, 287 },
321}; 288};
322 289
323module_platform_driver(sti_drm_platform_driver); 290module_platform_driver(sti_platform_driver);
324 291
325MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>"); 292MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
326MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver"); 293MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index c413aa3ff402..9372f69e1859 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -4,8 +4,8 @@
4 * License terms: GNU General Public License (GPL), version 2 4 * License terms: GNU General Public License (GPL), version 2
5 */ 5 */
6 6
7#ifndef _STI_DRM_DRV_H_ 7#ifndef _STI_DRV_H_
8#define _STI_DRM_DRV_H_ 8#define _STI_DRV_H_
9 9
10#include <drm/drmP.h> 10#include <drm/drmP.h>
11 11
@@ -20,7 +20,7 @@ struct sti_tvout;
20 * @plane_zorder_property: z-order property for CRTC planes 20 * @plane_zorder_property: z-order property for CRTC planes
21 * @drm_dev: drm device 21 * @drm_dev: drm device
22 */ 22 */
23struct sti_drm_private { 23struct sti_private {
24 struct sti_compositor *compo; 24 struct sti_compositor *compo;
25 struct drm_property *plane_zorder_property; 25 struct drm_property *plane_zorder_property;
26 struct drm_device *drm_dev; 26 struct drm_device *drm_dev;
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 087906fd8846..9365670427ad 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -9,9 +9,12 @@
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11 11
12#include <drm/drm_fb_cma_helper.h>
13#include <drm/drm_gem_cma_helper.h>
14
12#include "sti_compositor.h" 15#include "sti_compositor.h"
13#include "sti_gdp.h" 16#include "sti_gdp.h"
14#include "sti_layer.h" 17#include "sti_plane.h"
15#include "sti_vtg.h" 18#include "sti_vtg.h"
16 19
17#define ALPHASWITCH BIT(6) 20#define ALPHASWITCH BIT(6)
@@ -26,7 +29,7 @@
26#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH) 29#define GDP_XBGR8888 (GDP_RGB888_32 | BIGNOTLITTLE | ALPHASWITCH)
27#define GDP_ARGB8565 0x04 30#define GDP_ARGB8565 0x04
28#define GDP_ARGB8888 0x05 31#define GDP_ARGB8888 0x05
29#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH) 32#define GDP_ABGR8888 (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
30#define GDP_ARGB1555 0x06 33#define GDP_ARGB1555 0x06
31#define GDP_ARGB4444 0x07 34#define GDP_ARGB4444 0x07
32#define GDP_CLUT8 0x0B 35#define GDP_CLUT8 0x0B
@@ -53,8 +56,8 @@
53#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0)) 56#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
54#define GAM_GDP_SIZE_MAX 0x7FF 57#define GAM_GDP_SIZE_MAX 0x7FF
55 58
56#define GDP_NODE_NB_BANK 2 59#define GDP_NODE_NB_BANK 2
57#define GDP_NODE_PER_FIELD 2 60#define GDP_NODE_PER_FIELD 2
58 61
59struct sti_gdp_node { 62struct sti_gdp_node {
60 u32 gam_gdp_ctl; 63 u32 gam_gdp_ctl;
@@ -85,16 +88,20 @@ struct sti_gdp_node_list {
85/** 88/**
86 * STI GDP structure 89 * STI GDP structure
87 * 90 *
88 * @layer: layer structure 91 * @sti_plane: sti_plane structure
92 * @dev: driver device
93 * @regs: gdp registers
89 * @clk_pix: pixel clock for the current gdp 94 * @clk_pix: pixel clock for the current gdp
90 * @clk_main_parent: gdp parent clock if main path used 95 * @clk_main_parent: gdp parent clock if main path used
91 * @clk_aux_parent: gdp parent clock if aux path used 96 * @clk_aux_parent: gdp parent clock if aux path used
92 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification 97 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
93 * @is_curr_top: true if the current node processed is the top field 98 * @is_curr_top: true if the current node processed is the top field
94 * @node_list: array of node list 99 * @node_list: array of node list
95 */ 100 */
96struct sti_gdp { 101struct sti_gdp {
97 struct sti_layer layer; 102 struct sti_plane plane;
103 struct device *dev;
104 void __iomem *regs;
98 struct clk *clk_pix; 105 struct clk *clk_pix;
99 struct clk *clk_main_parent; 106 struct clk *clk_main_parent;
100 struct clk *clk_aux_parent; 107 struct clk *clk_aux_parent;
@@ -103,7 +110,7 @@ struct sti_gdp {
103 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; 110 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
104}; 111};
105 112
106#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer) 113#define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
107 114
108static const uint32_t gdp_supported_formats[] = { 115static const uint32_t gdp_supported_formats[] = {
109 DRM_FORMAT_XRGB8888, 116 DRM_FORMAT_XRGB8888,
@@ -120,16 +127,6 @@ static const uint32_t gdp_supported_formats[] = {
120 DRM_FORMAT_C8, 127 DRM_FORMAT_C8,
121}; 128};
122 129
123static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
124{
125 return gdp_supported_formats;
126}
127
128static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
129{
130 return ARRAY_SIZE(gdp_supported_formats);
131}
132
133static int sti_gdp_fourcc2format(int fourcc) 130static int sti_gdp_fourcc2format(int fourcc)
134{ 131{
135 switch (fourcc) { 132 switch (fourcc) {
@@ -175,20 +172,19 @@ static int sti_gdp_get_alpharange(int format)
175 172
176/** 173/**
177 * sti_gdp_get_free_nodes 174 * sti_gdp_get_free_nodes
178 * @layer: gdp layer 175 * @gdp: gdp pointer
179 * 176 *
180 * Look for a GDP node list that is not currently read by the HW. 177 * Look for a GDP node list that is not currently read by the HW.
181 * 178 *
182 * RETURNS: 179 * RETURNS:
183 * Pointer to the free GDP node list 180 * Pointer to the free GDP node list
184 */ 181 */
185static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) 182static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_gdp *gdp)
186{ 183{
187 int hw_nvn; 184 int hw_nvn;
188 struct sti_gdp *gdp = to_sti_gdp(layer);
189 unsigned int i; 185 unsigned int i;
190 186
191 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 187 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
192 if (!hw_nvn) 188 if (!hw_nvn)
193 goto end; 189 goto end;
194 190
@@ -199,7 +195,7 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
199 195
200 /* in hazardious cases restart with the first node */ 196 /* in hazardious cases restart with the first node */
201 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n", 197 DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
202 sti_layer_to_str(layer), hw_nvn); 198 sti_plane_to_str(&gdp->plane), hw_nvn);
203 199
204end: 200end:
205 return &gdp->node_list[0]; 201 return &gdp->node_list[0];
@@ -207,7 +203,7 @@ end:
207 203
208/** 204/**
209 * sti_gdp_get_current_nodes 205 * sti_gdp_get_current_nodes
210 * @layer: GDP layer 206 * @gdp: gdp pointer
211 * 207 *
212 * Look for GDP nodes that are currently read by the HW. 208 * Look for GDP nodes that are currently read by the HW.
213 * 209 *
@@ -215,13 +211,12 @@ end:
215 * Pointer to the current GDP node list 211 * Pointer to the current GDP node list
216 */ 212 */
217static 213static
218struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) 214struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_gdp *gdp)
219{ 215{
220 int hw_nvn; 216 int hw_nvn;
221 struct sti_gdp *gdp = to_sti_gdp(layer);
222 unsigned int i; 217 unsigned int i;
223 218
224 hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET); 219 hw_nvn = readl(gdp->regs + GAM_GDP_NVN_OFFSET);
225 if (!hw_nvn) 220 if (!hw_nvn)
226 goto end; 221 goto end;
227 222
@@ -232,205 +227,25 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
232 227
233end: 228end:
234 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n", 229 DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
235 hw_nvn, sti_layer_to_str(layer)); 230 hw_nvn, sti_plane_to_str(&gdp->plane));
236 231
237 return NULL; 232 return NULL;
238} 233}
239 234
240/** 235/**
241 * sti_gdp_prepare_layer 236 * sti_gdp_disable
242 * @lay: gdp layer 237 * @gdp: gdp pointer
243 * @first_prepare: true if it is the first time this function is called
244 *
245 * Update the free GDP node list according to the layer properties.
246 *
247 * RETURNS:
248 * 0 on success.
249 */
250static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
251{
252 struct sti_gdp_node_list *list;
253 struct sti_gdp_node *top_field, *btm_field;
254 struct drm_display_mode *mode = layer->mode;
255 struct device *dev = layer->dev;
256 struct sti_gdp *gdp = to_sti_gdp(layer);
257 struct sti_compositor *compo = dev_get_drvdata(dev);
258 int format;
259 unsigned int depth, bpp;
260 int rate = mode->clock * 1000;
261 int res;
262 u32 ydo, xdo, yds, xds;
263
264 list = sti_gdp_get_free_nodes(layer);
265 top_field = list->top_field;
266 btm_field = list->btm_field;
267
268 dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
269 sti_layer_to_str(layer), top_field, btm_field);
270
271 /* Build the top field from layer params */
272 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
273 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
274 format = sti_gdp_fourcc2format(layer->format);
275 if (format == -1) {
276 DRM_ERROR("Format not supported by GDP %.4s\n",
277 (char *)&layer->format);
278 return 1;
279 }
280 top_field->gam_gdp_ctl |= format;
281 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
282 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
283
284 /* pixel memory location */
285 drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
286 top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
287 top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
288 top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
289
290 /* input parameters */
291 top_field->gam_gdp_pmp = layer->pitches[0];
292 top_field->gam_gdp_size =
293 clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
294 clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
295
296 /* output parameters */
297 ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
298 yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
299 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
300 xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
301 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
302 top_field->gam_gdp_vps = (yds << 16) | xds;
303
304 /* Same content and chained together */
305 memcpy(btm_field, top_field, sizeof(*btm_field));
306 top_field->gam_gdp_nvn = list->btm_field_paddr;
307 btm_field->gam_gdp_nvn = list->top_field_paddr;
308
309 /* Interlaced mode */
310 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
311 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
312 layer->pitches[0];
313
314 if (first_prepare) {
315 /* Register gdp callback */
316 if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
317 compo->vtg_main : compo->vtg_aux,
318 &gdp->vtg_field_nb, layer->mixer_id)) {
319 DRM_ERROR("Cannot register VTG notifier\n");
320 return 1;
321 }
322
323 /* Set and enable gdp clock */
324 if (gdp->clk_pix) {
325 struct clk *clkp;
326 /* According to the mixer used, the gdp pixel clock
327 * should have a different parent clock. */
328 if (layer->mixer_id == STI_MIXER_MAIN)
329 clkp = gdp->clk_main_parent;
330 else
331 clkp = gdp->clk_aux_parent;
332
333 if (clkp)
334 clk_set_parent(gdp->clk_pix, clkp);
335
336 res = clk_set_rate(gdp->clk_pix, rate);
337 if (res < 0) {
338 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
339 rate);
340 return 1;
341 }
342
343 if (clk_prepare_enable(gdp->clk_pix)) {
344 DRM_ERROR("Failed to prepare/enable gdp\n");
345 return 1;
346 }
347 }
348 }
349
350 return 0;
351}
352
353/**
354 * sti_gdp_commit_layer
355 * @lay: gdp layer
356 *
357 * Update the NVN field of the 'right' field of the current GDP node (being
358 * used by the HW) with the address of the updated ('free') top field GDP node.
359 * - In interlaced mode the 'right' field is the bottom field as we update
360 * frames starting from their top field
361 * - In progressive mode, we update both bottom and top fields which are
362 * equal nodes.
363 * At the next VSYNC, the updated node list will be used by the HW.
364 *
365 * RETURNS:
366 * 0 on success.
367 */
368static int sti_gdp_commit_layer(struct sti_layer *layer)
369{
370 struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
371 struct sti_gdp_node *updated_top_node = updated_list->top_field;
372 struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
373 struct sti_gdp *gdp = to_sti_gdp(layer);
374 u32 dma_updated_top = updated_list->top_field_paddr;
375 u32 dma_updated_btm = updated_list->btm_field_paddr;
376 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
377
378 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
379 sti_layer_to_str(layer),
380 updated_top_node, updated_btm_node);
381 dev_dbg(layer->dev, "Current NVN:0x%X\n",
382 readl(layer->regs + GAM_GDP_NVN_OFFSET));
383 dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
384 (unsigned long)layer->paddr,
385 readl(layer->regs + GAM_GDP_PML_OFFSET));
386
387 if (curr_list == NULL) {
388 /* First update or invalid node should directly write in the
389 * hw register */
390 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
391 sti_layer_to_str(layer));
392
393 writel(gdp->is_curr_top == true ?
394 dma_updated_btm : dma_updated_top,
395 layer->regs + GAM_GDP_NVN_OFFSET);
396 return 0;
397 }
398
399 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
400 if (gdp->is_curr_top == true) {
401 /* Do not update in the middle of the frame, but
402 * postpone the update after the bottom field has
403 * been displayed */
404 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
405 } else {
406 /* Direct update to avoid one frame delay */
407 writel(dma_updated_top,
408 layer->regs + GAM_GDP_NVN_OFFSET);
409 }
410 } else {
411 /* Direct update for progressive to avoid one frame delay */
412 writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
413 }
414
415 return 0;
416}
417
418/**
419 * sti_gdp_disable_layer
420 * @lay: gdp layer
421 * 238 *
422 * Disable a GDP. 239 * Disable a GDP.
423 *
424 * RETURNS:
425 * 0 on success.
426 */ 240 */
427static int sti_gdp_disable_layer(struct sti_layer *layer) 241static void sti_gdp_disable(struct sti_gdp *gdp)
428{ 242{
243 struct drm_plane *drm_plane = &gdp->plane.drm_plane;
244 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
245 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
429 unsigned int i; 246 unsigned int i;
430 struct sti_gdp *gdp = to_sti_gdp(layer);
431 struct sti_compositor *compo = dev_get_drvdata(layer->dev);
432 247
433 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 248 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
434 249
435 /* Set the nodes as 'to be ignored on mixer' */ 250 /* Set the nodes as 'to be ignored on mixer' */
436 for (i = 0; i < GDP_NODE_NB_BANK; i++) { 251 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
@@ -438,14 +253,14 @@ static int sti_gdp_disable_layer(struct sti_layer *layer)
438 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE; 253 gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
439 } 254 }
440 255
441 if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ? 256 if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
442 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb)) 257 compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
443 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 258 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
444 259
445 if (gdp->clk_pix) 260 if (gdp->clk_pix)
446 clk_disable_unprepare(gdp->clk_pix); 261 clk_disable_unprepare(gdp->clk_pix);
447 262
448 return 0; 263 gdp->plane.status = STI_PLANE_DISABLED;
449} 264}
450 265
451/** 266/**
@@ -464,6 +279,14 @@ int sti_gdp_field_cb(struct notifier_block *nb,
464{ 279{
465 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb); 280 struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
466 281
282 if (gdp->plane.status == STI_PLANE_FLUSHING) {
283 /* disable need to be synchronize on vsync event */
284 DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
285 sti_plane_to_str(&gdp->plane));
286
287 sti_gdp_disable(gdp);
288 }
289
467 switch (event) { 290 switch (event) {
468 case VTG_TOP_FIELD_EVENT: 291 case VTG_TOP_FIELD_EVENT:
469 gdp->is_curr_top = true; 292 gdp->is_curr_top = true;
@@ -479,10 +302,9 @@ int sti_gdp_field_cb(struct notifier_block *nb,
479 return 0; 302 return 0;
480} 303}
481 304
482static void sti_gdp_init(struct sti_layer *layer) 305static void sti_gdp_init(struct sti_gdp *gdp)
483{ 306{
484 struct sti_gdp *gdp = to_sti_gdp(layer); 307 struct device_node *np = gdp->dev->of_node;
485 struct device_node *np = layer->dev->of_node;
486 dma_addr_t dma_addr; 308 dma_addr_t dma_addr;
487 void *base; 309 void *base;
488 unsigned int i, size; 310 unsigned int i, size;
@@ -490,8 +312,8 @@ static void sti_gdp_init(struct sti_layer *layer)
490 /* Allocate all the nodes within a single memory page */ 312 /* Allocate all the nodes within a single memory page */
491 size = sizeof(struct sti_gdp_node) * 313 size = sizeof(struct sti_gdp_node) *
492 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; 314 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
493 base = dma_alloc_writecombine(layer->dev, 315 base = dma_alloc_writecombine(gdp->dev,
494 size, &dma_addr, GFP_KERNEL | GFP_DMA); 316 size, &dma_addr, GFP_KERNEL | GFP_DMA);
495 317
496 if (!base) { 318 if (!base) {
497 DRM_ERROR("Failed to allocate memory for GDP node\n"); 319 DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -526,7 +348,7 @@ static void sti_gdp_init(struct sti_layer *layer)
526 /* GDP of STiH407 chip have its own pixel clock */ 348 /* GDP of STiH407 chip have its own pixel clock */
527 char *clk_name; 349 char *clk_name;
528 350
529 switch (layer->desc) { 351 switch (gdp->plane.desc) {
530 case STI_GDP_0: 352 case STI_GDP_0:
531 clk_name = "pix_gdp1"; 353 clk_name = "pix_gdp1";
532 break; 354 break;
@@ -544,32 +366,249 @@ static void sti_gdp_init(struct sti_layer *layer)
544 return; 366 return;
545 } 367 }
546 368
547 gdp->clk_pix = devm_clk_get(layer->dev, clk_name); 369 gdp->clk_pix = devm_clk_get(gdp->dev, clk_name);
548 if (IS_ERR(gdp->clk_pix)) 370 if (IS_ERR(gdp->clk_pix))
549 DRM_ERROR("Cannot get %s clock\n", clk_name); 371 DRM_ERROR("Cannot get %s clock\n", clk_name);
550 372
551 gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent"); 373 gdp->clk_main_parent = devm_clk_get(gdp->dev, "main_parent");
552 if (IS_ERR(gdp->clk_main_parent)) 374 if (IS_ERR(gdp->clk_main_parent))
553 DRM_ERROR("Cannot get main_parent clock\n"); 375 DRM_ERROR("Cannot get main_parent clock\n");
554 376
555 gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent"); 377 gdp->clk_aux_parent = devm_clk_get(gdp->dev, "aux_parent");
556 if (IS_ERR(gdp->clk_aux_parent)) 378 if (IS_ERR(gdp->clk_aux_parent))
557 DRM_ERROR("Cannot get aux_parent clock\n"); 379 DRM_ERROR("Cannot get aux_parent clock\n");
558 } 380 }
559} 381}
560 382
561static const struct sti_layer_funcs gdp_ops = { 383static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
562 .get_formats = sti_gdp_get_formats, 384 struct drm_plane_state *oldstate)
563 .get_nb_formats = sti_gdp_get_nb_formats, 385{
564 .init = sti_gdp_init, 386 struct drm_plane_state *state = drm_plane->state;
565 .prepare = sti_gdp_prepare_layer, 387 struct sti_plane *plane = to_sti_plane(drm_plane);
566 .commit = sti_gdp_commit_layer, 388 struct sti_gdp *gdp = to_sti_gdp(plane);
567 .disable = sti_gdp_disable_layer, 389 struct drm_crtc *crtc = state->crtc;
390 struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
391 struct drm_framebuffer *fb = state->fb;
392 bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
393 struct sti_mixer *mixer;
394 struct drm_display_mode *mode;
395 int dst_x, dst_y, dst_w, dst_h;
396 int src_x, src_y, src_w, src_h;
397 struct drm_gem_cma_object *cma_obj;
398 struct sti_gdp_node_list *list;
399 struct sti_gdp_node_list *curr_list;
400 struct sti_gdp_node *top_field, *btm_field;
401 u32 dma_updated_top;
402 u32 dma_updated_btm;
403 int format;
404 unsigned int depth, bpp;
405 u32 ydo, xdo, yds, xds;
406 int res;
407
408 /* Manage the case where crtc is null (disabled) */
409 if (!crtc)
410 return;
411
412 mixer = to_sti_mixer(crtc);
413 mode = &crtc->mode;
414 dst_x = state->crtc_x;
415 dst_y = state->crtc_y;
416 dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
417 dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
418 /* src_x are in 16.16 format */
419 src_x = state->src_x >> 16;
420 src_y = state->src_y >> 16;
421 src_w = state->src_w >> 16;
422 src_h = state->src_h >> 16;
423
424 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
425 crtc->base.id, sti_mixer_to_str(mixer),
426 drm_plane->base.id, sti_plane_to_str(plane));
427 DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
428 sti_plane_to_str(plane),
429 dst_w, dst_h, dst_x, dst_y,
430 src_w, src_h, src_x, src_y);
431
432 list = sti_gdp_get_free_nodes(gdp);
433 top_field = list->top_field;
434 btm_field = list->btm_field;
435
436 dev_dbg(gdp->dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
437 sti_plane_to_str(plane), top_field, btm_field);
438
439 /* build the top field */
440 top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
441 top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
442 format = sti_gdp_fourcc2format(fb->pixel_format);
443 if (format == -1) {
444 DRM_ERROR("Format not supported by GDP %.4s\n",
445 (char *)&fb->pixel_format);
446 return;
447 }
448 top_field->gam_gdp_ctl |= format;
449 top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
450 top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
451
452 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
453 if (!cma_obj) {
454 DRM_ERROR("Can't get CMA GEM object for fb\n");
455 return;
456 }
457
458 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
459 (char *)&fb->pixel_format,
460 (unsigned long)cma_obj->paddr);
461
462 /* pixel memory location */
463 drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
464 top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0];
465 top_field->gam_gdp_pml += src_x * (bpp >> 3);
466 top_field->gam_gdp_pml += src_y * fb->pitches[0];
467
468 /* input parameters */
469 top_field->gam_gdp_pmp = fb->pitches[0];
470 top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
471 clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
472
473 /* output parameters */
474 ydo = sti_vtg_get_line_number(*mode, dst_y);
475 yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
476 xdo = sti_vtg_get_pixel_number(*mode, dst_x);
477 xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
478 top_field->gam_gdp_vpo = (ydo << 16) | xdo;
479 top_field->gam_gdp_vps = (yds << 16) | xds;
480
481 /* Same content and chained together */
482 memcpy(btm_field, top_field, sizeof(*btm_field));
483 top_field->gam_gdp_nvn = list->btm_field_paddr;
484 btm_field->gam_gdp_nvn = list->top_field_paddr;
485
486 /* Interlaced mode */
487 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
488 btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
489 fb->pitches[0];
490
491 if (first_prepare) {
492 /* Register gdp callback */
493 if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
494 compo->vtg_main : compo->vtg_aux,
495 &gdp->vtg_field_nb, mixer->id)) {
496 DRM_ERROR("Cannot register VTG notifier\n");
497 return;
498 }
499
500 /* Set and enable gdp clock */
501 if (gdp->clk_pix) {
502 struct clk *clkp;
503 int rate = mode->clock * 1000;
504
505 /* According to the mixer used, the gdp pixel clock
506 * should have a different parent clock. */
507 if (mixer->id == STI_MIXER_MAIN)
508 clkp = gdp->clk_main_parent;
509 else
510 clkp = gdp->clk_aux_parent;
511
512 if (clkp)
513 clk_set_parent(gdp->clk_pix, clkp);
514
515 res = clk_set_rate(gdp->clk_pix, rate);
516 if (res < 0) {
517 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
518 rate);
519 return;
520 }
521
522 if (clk_prepare_enable(gdp->clk_pix)) {
523 DRM_ERROR("Failed to prepare/enable gdp\n");
524 return;
525 }
526 }
527 }
528
529 /* Update the NVN field of the 'right' field of the current GDP node
530 * (being used by the HW) with the address of the updated ('free') top
531 * field GDP node.
532 * - In interlaced mode the 'right' field is the bottom field as we
533 * update frames starting from their top field
534 * - In progressive mode, we update both bottom and top fields which
535 * are equal nodes.
536 * At the next VSYNC, the updated node list will be used by the HW.
537 */
538 curr_list = sti_gdp_get_current_nodes(gdp);
539 dma_updated_top = list->top_field_paddr;
540 dma_updated_btm = list->btm_field_paddr;
541
542 dev_dbg(gdp->dev, "Current NVN:0x%X\n",
543 readl(gdp->regs + GAM_GDP_NVN_OFFSET));
544 dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
545 (unsigned long)cma_obj->paddr,
546 readl(gdp->regs + GAM_GDP_PML_OFFSET));
547
548 if (!curr_list) {
549 /* First update or invalid node should directly write in the
550 * hw register */
551 DRM_DEBUG_DRIVER("%s first update (or invalid node)",
552 sti_plane_to_str(plane));
553
554 writel(gdp->is_curr_top ?
555 dma_updated_btm : dma_updated_top,
556 gdp->regs + GAM_GDP_NVN_OFFSET);
557 goto end;
558 }
559
560 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
561 if (gdp->is_curr_top) {
562 /* Do not update in the middle of the frame, but
563 * postpone the update after the bottom field has
564 * been displayed */
565 curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
566 } else {
567 /* Direct update to avoid one frame delay */
568 writel(dma_updated_top,
569 gdp->regs + GAM_GDP_NVN_OFFSET);
570 }
571 } else {
572 /* Direct update for progressive to avoid one frame delay */
573 writel(dma_updated_top, gdp->regs + GAM_GDP_NVN_OFFSET);
574 }
575
576end:
577 plane->status = STI_PLANE_UPDATED;
578}
579
580static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
581 struct drm_plane_state *oldstate)
582{
583 struct sti_plane *plane = to_sti_plane(drm_plane);
584 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
585
586 if (!drm_plane->crtc) {
587 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
588 drm_plane->base.id);
589 return;
590 }
591
592 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
593 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
594 drm_plane->base.id, sti_plane_to_str(plane));
595
596 plane->status = STI_PLANE_DISABLING;
597}
598
599static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
600 .atomic_update = sti_gdp_atomic_update,
601 .atomic_disable = sti_gdp_atomic_disable,
568}; 602};
569 603
570struct sti_layer *sti_gdp_create(struct device *dev, int id) 604struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
605 struct device *dev, int desc,
606 void __iomem *baseaddr,
607 unsigned int possible_crtcs,
608 enum drm_plane_type type)
571{ 609{
572 struct sti_gdp *gdp; 610 struct sti_gdp *gdp;
611 int res;
573 612
574 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL); 613 gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
575 if (!gdp) { 614 if (!gdp) {
@@ -577,8 +616,33 @@ struct sti_layer *sti_gdp_create(struct device *dev, int id)
577 return NULL; 616 return NULL;
578 } 617 }
579 618
580 gdp->layer.ops = &gdp_ops; 619 gdp->dev = dev;
620 gdp->regs = baseaddr;
621 gdp->plane.desc = desc;
622 gdp->plane.status = STI_PLANE_DISABLED;
623
581 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb; 624 gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
582 625
583 return (struct sti_layer *)gdp; 626 sti_gdp_init(gdp);
627
628 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
629 possible_crtcs,
630 &sti_plane_helpers_funcs,
631 gdp_supported_formats,
632 ARRAY_SIZE(gdp_supported_formats),
633 type);
634 if (res) {
635 DRM_ERROR("Failed to initialize universal plane\n");
636 goto err;
637 }
638
639 drm_plane_helper_add(&gdp->plane.drm_plane, &sti_gdp_helpers_funcs);
640
641 sti_plane_init_property(&gdp->plane, type);
642
643 return &gdp->plane.drm_plane;
644
645err:
646 devm_kfree(dev, gdp);
647 return NULL;
584} 648}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
index 1dab68274ad3..73947a4a8004 100644
--- a/drivers/gpu/drm/sti/sti_gdp.h
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -11,6 +11,9 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14struct sti_layer *sti_gdp_create(struct device *dev, int id); 14struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
15 15 struct device *dev, int desc,
16 void __iomem *baseaddr,
17 unsigned int possible_crtcs,
18 enum drm_plane_type type);
16#endif 19#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f28a4d54487c..09e29e43423e 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -588,7 +588,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
588 return count; 588 return count;
589 589
590fail: 590fail:
591 DRM_ERROR("Can not read HDMI EDID\n"); 591 DRM_ERROR("Can't read HDMI EDID\n");
592 return 0; 592 return 0;
593} 593}
594 594
@@ -693,21 +693,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
693 struct sti_hdmi_connector *connector; 693 struct sti_hdmi_connector *connector;
694 struct drm_connector *drm_connector; 694 struct drm_connector *drm_connector;
695 struct drm_bridge *bridge; 695 struct drm_bridge *bridge;
696 struct device_node *ddc;
697 int err; 696 int err;
698 697
699 ddc = of_parse_phandle(dev->of_node, "ddc", 0);
700 if (ddc) {
701 hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
702 if (!hdmi->ddc_adapt) {
703 err = -EPROBE_DEFER;
704 of_node_put(ddc);
705 return err;
706 }
707
708 of_node_put(ddc);
709 }
710
711 /* Set the drm device handle */ 698 /* Set the drm device handle */
712 hdmi->drm_dev = drm_dev; 699 hdmi->drm_dev = drm_dev;
713 700
@@ -796,6 +783,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
796 struct sti_hdmi *hdmi; 783 struct sti_hdmi *hdmi;
797 struct device_node *np = dev->of_node; 784 struct device_node *np = dev->of_node;
798 struct resource *res; 785 struct resource *res;
786 struct device_node *ddc;
799 int ret; 787 int ret;
800 788
801 DRM_INFO("%s\n", __func__); 789 DRM_INFO("%s\n", __func__);
@@ -804,6 +792,17 @@ static int sti_hdmi_probe(struct platform_device *pdev)
804 if (!hdmi) 792 if (!hdmi)
805 return -ENOMEM; 793 return -ENOMEM;
806 794
795 ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0);
796 if (ddc) {
797 hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
798 if (!hdmi->ddc_adapt) {
799 of_node_put(ddc);
800 return -EPROBE_DEFER;
801 }
802
803 of_node_put(ddc);
804 }
805
807 hdmi->dev = pdev->dev; 806 hdmi->dev = pdev->dev;
808 807
809 /* Get resources */ 808 /* Get resources */
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index b0eb62de1b2e..7c8f9b8bfae1 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -12,11 +12,12 @@
12#include <linux/reset.h> 12#include <linux/reset.h>
13 13
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_fb_cma_helper.h>
16#include <drm/drm_gem_cma_helper.h>
15 17
16#include "sti_drm_plane.h" 18#include "sti_compositor.h"
17#include "sti_hqvdp.h"
18#include "sti_hqvdp_lut.h" 19#include "sti_hqvdp_lut.h"
19#include "sti_layer.h" 20#include "sti_plane.h"
20#include "sti_vtg.h" 21#include "sti_vtg.h"
21 22
22/* Firmware name */ 23/* Firmware name */
@@ -322,8 +323,7 @@ struct sti_hqvdp_cmd {
322 * @dev: driver device 323 * @dev: driver device
323 * @drm_dev: the drm device 324 * @drm_dev: the drm device
324 * @regs: registers 325 * @regs: registers
325 * @layer: layer structure for hqvdp it self 326 * @plane: plane structure for hqvdp it self
326 * @vid_plane: VID plug used as link with compositor IP
327 * @clk: IP clock 327 * @clk: IP clock
328 * @clk_pix_main: pix main clock 328 * @clk_pix_main: pix main clock
329 * @reset: reset control 329 * @reset: reset control
@@ -334,13 +334,13 @@ struct sti_hqvdp_cmd {
334 * @hqvdp_cmd: buffer of commands 334 * @hqvdp_cmd: buffer of commands
335 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd 335 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
336 * @vtg: vtg for main data path 336 * @vtg: vtg for main data path
337 * @xp70_initialized: true if xp70 is already initialized
337 */ 338 */
338struct sti_hqvdp { 339struct sti_hqvdp {
339 struct device *dev; 340 struct device *dev;
340 struct drm_device *drm_dev; 341 struct drm_device *drm_dev;
341 void __iomem *regs; 342 void __iomem *regs;
342 struct sti_layer layer; 343 struct sti_plane plane;
343 struct drm_plane *vid_plane;
344 struct clk *clk; 344 struct clk *clk;
345 struct clk *clk_pix_main; 345 struct clk *clk_pix_main;
346 struct reset_control *reset; 346 struct reset_control *reset;
@@ -351,24 +351,15 @@ struct sti_hqvdp {
351 void *hqvdp_cmd; 351 void *hqvdp_cmd;
352 dma_addr_t hqvdp_cmd_paddr; 352 dma_addr_t hqvdp_cmd_paddr;
353 struct sti_vtg *vtg; 353 struct sti_vtg *vtg;
354 bool xp70_initialized;
354}; 355};
355 356
356#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer) 357#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
357 358
358static const uint32_t hqvdp_supported_formats[] = { 359static const uint32_t hqvdp_supported_formats[] = {
359 DRM_FORMAT_NV12, 360 DRM_FORMAT_NV12,
360}; 361};
361 362
362static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
363{
364 return hqvdp_supported_formats;
365}
366
367static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
368{
369 return ARRAY_SIZE(hqvdp_supported_formats);
370}
371
372/** 363/**
373 * sti_hqvdp_get_free_cmd 364 * sti_hqvdp_get_free_cmd
374 * @hqvdp: hqvdp structure 365 * @hqvdp: hqvdp structure
@@ -484,7 +475,12 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
484 475
485/** 476/**
486 * sti_hqvdp_check_hw_scaling 477 * sti_hqvdp_check_hw_scaling
487 * @layer: hqvdp layer 478 * @hqvdp: hqvdp pointer
479 * @mode: display mode with timing constraints
480 * @src_w: source width
481 * @src_h: source height
482 * @dst_w: destination width
483 * @dst_h: destination height
488 * 484 *
489 * Check if the HW is able to perform the scaling request 485 * Check if the HW is able to perform the scaling request
490 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where: 486 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
@@ -498,184 +494,36 @@ static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
498 * RETURNS: 494 * RETURNS:
499 * True if the HW can scale. 495 * True if the HW can scale.
500 */ 496 */
501static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer) 497static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
498 struct drm_display_mode *mode,
499 int src_w, int src_h,
500 int dst_w, int dst_h)
502{ 501{
503 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
504 unsigned long lfw; 502 unsigned long lfw;
505 unsigned int inv_zy; 503 unsigned int inv_zy;
506 504
507 lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000); 505 lfw = mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
508 lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000; 506 lfw /= max(src_w, dst_w) * mode->clock / 1000;
509 507
510 inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h); 508 inv_zy = DIV_ROUND_UP(src_h, dst_h);
511 509
512 return (inv_zy <= lfw) ? true : false; 510 return (inv_zy <= lfw) ? true : false;
513} 511}
514 512
515/** 513/**
516 * sti_hqvdp_prepare_layer 514 * sti_hqvdp_disable
517 * @layer: hqvdp layer 515 * @hqvdp: hqvdp pointer
518 * @first_prepare: true if it is the first time this function is called
519 * 516 *
520 * Prepares a command for the firmware 517 * Disables the HQVDP plane
521 *
522 * RETURNS:
523 * 0 on success.
524 */ 518 */
525static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare) 519static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
526{
527 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
528 struct sti_hqvdp_cmd *cmd;
529 int scale_h, scale_v;
530 int cmd_offset;
531
532 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
533
534 /* prepare and commit VID plane */
535 hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
536 layer->crtc, layer->fb,
537 layer->dst_x, layer->dst_y,
538 layer->dst_w, layer->dst_h,
539 layer->src_x, layer->src_y,
540 layer->src_w, layer->src_h);
541
542 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
543 if (cmd_offset == -1) {
544 DRM_ERROR("No available hqvdp_cmd now\n");
545 return -EBUSY;
546 }
547 cmd = hqvdp->hqvdp_cmd + cmd_offset;
548
549 if (!sti_hqvdp_check_hw_scaling(layer)) {
550 DRM_ERROR("Scaling beyond HW capabilities\n");
551 return -EINVAL;
552 }
553
554 /* Static parameters, defaulting to progressive mode */
555 cmd->top.config = TOP_CONFIG_PROGRESSIVE;
556 cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
557 cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
558 cmd->csdi.config = CSDI_CONFIG_PROG;
559
560 /* VC1RE, FMD bypassed : keep everything set to 0
561 * IQI/P2I bypassed */
562 cmd->iqi.config = IQI_CONFIG_DFLT;
563 cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
564 cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
565 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
566
567 /* Buffer planes address */
568 cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
569 cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
570
571 /* Pitches */
572 cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
573 layer->pitches[0];
574 cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
575 layer->pitches[1];
576
577 /* Input / output size
578 * Align to upper even value */
579 layer->dst_w = ALIGN(layer->dst_w, 2);
580 layer->dst_h = ALIGN(layer->dst_h, 2);
581
582 if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
583 (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
584 (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
585 (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
586 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
587 layer->src_w, layer->src_h,
588 layer->dst_w, layer->dst_h);
589 return -EINVAL;
590 }
591 cmd->top.input_viewport_size = cmd->top.input_frame_size =
592 layer->src_h << 16 | layer->src_w;
593 cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
594 cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
595
596 /* Handle interlaced */
597 if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
598 /* Top field to display */
599 cmd->top.config = TOP_CONFIG_INTER_TOP;
600
601 /* Update pitches and vert size */
602 cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
603 layer->src_w;
604 cmd->top.luma_processed_pitch *= 2;
605 cmd->top.luma_src_pitch *= 2;
606 cmd->top.chroma_processed_pitch *= 2;
607 cmd->top.chroma_src_pitch *= 2;
608
609 /* Enable directional deinterlacing processing */
610 cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
611 cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
612 cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
613 }
614
615 /* Update hvsrc lut coef */
616 scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
617 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
618
619 scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
620 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
621
622 if (first_prepare) {
623 /* Prevent VTG shutdown */
624 if (clk_prepare_enable(hqvdp->clk_pix_main)) {
625 DRM_ERROR("Failed to prepare/enable pix main clk\n");
626 return -ENXIO;
627 }
628
629 /* Register VTG Vsync callback to handle bottom fields */
630 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
631 sti_vtg_register_client(hqvdp->vtg,
632 &hqvdp->vtg_nb, layer->mixer_id)) {
633 DRM_ERROR("Cannot register VTG notifier\n");
634 return -ENXIO;
635 }
636 }
637
638 return 0;
639}
640
641static int sti_hqvdp_commit_layer(struct sti_layer *layer)
642{ 520{
643 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
644 int cmd_offset;
645
646 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
647
648 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
649 if (cmd_offset == -1) {
650 DRM_ERROR("No available hqvdp_cmd now\n");
651 return -EBUSY;
652 }
653
654 writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
655 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
656
657 hqvdp->curr_field_count++;
658
659 /* Interlaced : get ready to display the bottom field at next Vsync */
660 if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
661 hqvdp->btm_field_pending = true;
662
663 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
664 __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
665
666 return 0;
667}
668
669static int sti_hqvdp_disable_layer(struct sti_layer *layer)
670{
671 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
672 int i; 521 int i;
673 522
674 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer)); 523 DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&hqvdp->plane));
675 524
676 /* Unregister VTG Vsync callback */ 525 /* Unregister VTG Vsync callback */
677 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) && 526 if (sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
678 sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
679 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 527 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
680 528
681 /* Set next cmd to NULL */ 529 /* Set next cmd to NULL */
@@ -691,15 +539,10 @@ static int sti_hqvdp_disable_layer(struct sti_layer *layer)
691 /* VTG can stop now */ 539 /* VTG can stop now */
692 clk_disable_unprepare(hqvdp->clk_pix_main); 540 clk_disable_unprepare(hqvdp->clk_pix_main);
693 541
694 if (i == POLL_MAX_ATTEMPT) { 542 if (i == POLL_MAX_ATTEMPT)
695 DRM_ERROR("XP70 could not revert to idle\n"); 543 DRM_ERROR("XP70 could not revert to idle\n");
696 return -ENXIO;
697 }
698
699 /* disable VID plane */
700 hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
701 544
702 return 0; 545 hqvdp->plane.status = STI_PLANE_DISABLED;
703} 546}
704 547
705/** 548/**
@@ -724,6 +567,14 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
724 return 0; 567 return 0;
725 } 568 }
726 569
570 if (hqvdp->plane.status == STI_PLANE_FLUSHING) {
571 /* disable need to be synchronize on vsync event */
572 DRM_DEBUG_DRIVER("Vsync event received => disable %s\n",
573 sti_plane_to_str(&hqvdp->plane));
574
575 sti_hqvdp_disable(hqvdp);
576 }
577
727 if (hqvdp->btm_field_pending) { 578 if (hqvdp->btm_field_pending) {
728 /* Create the btm field command from the current one */ 579 /* Create the btm field command from the current one */
729 btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp); 580 btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
@@ -758,32 +609,10 @@ int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
758 return 0; 609 return 0;
759} 610}
760 611
761static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id) 612static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
762{ 613{
763 struct drm_plane *plane;
764
765 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
766 struct sti_layer *layer = to_sti_layer(plane);
767
768 if (layer->desc == id)
769 return plane;
770 }
771
772 return NULL;
773}
774
775static void sti_hqvd_init(struct sti_layer *layer)
776{
777 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
778 int size; 614 int size;
779 615
780 /* find the plane macthing with vid 0 */
781 hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
782 if (!hqvdp->vid_plane) {
783 DRM_ERROR("Cannot find Main video layer\n");
784 return;
785 }
786
787 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb; 616 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
788 617
789 /* Allocate memory for the VDP commands */ 618 /* Allocate memory for the VDP commands */
@@ -799,24 +628,213 @@ static void sti_hqvd_init(struct sti_layer *layer)
799 memset(hqvdp->hqvdp_cmd, 0, size); 628 memset(hqvdp->hqvdp_cmd, 0, size);
800} 629}
801 630
802static const struct sti_layer_funcs hqvdp_ops = { 631static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
803 .get_formats = sti_hqvdp_get_formats, 632 struct drm_plane_state *oldstate)
804 .get_nb_formats = sti_hqvdp_get_nb_formats, 633{
805 .init = sti_hqvd_init, 634 struct drm_plane_state *state = drm_plane->state;
806 .prepare = sti_hqvdp_prepare_layer, 635 struct sti_plane *plane = to_sti_plane(drm_plane);
807 .commit = sti_hqvdp_commit_layer, 636 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
808 .disable = sti_hqvdp_disable_layer, 637 struct drm_crtc *crtc = state->crtc;
638 struct sti_mixer *mixer = to_sti_mixer(crtc);
639 struct drm_framebuffer *fb = state->fb;
640 struct drm_display_mode *mode = &crtc->mode;
641 int dst_x = state->crtc_x;
642 int dst_y = state->crtc_y;
643 int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
644 int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
645 /* src_x are in 16.16 format */
646 int src_x = state->src_x >> 16;
647 int src_y = state->src_y >> 16;
648 int src_w = state->src_w >> 16;
649 int src_h = state->src_h >> 16;
650 bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
651 struct drm_gem_cma_object *cma_obj;
652 struct sti_hqvdp_cmd *cmd;
653 int scale_h, scale_v;
654 int cmd_offset;
655
656 DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
657 crtc->base.id, sti_mixer_to_str(mixer),
658 drm_plane->base.id, sti_plane_to_str(plane));
659 DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
660 sti_plane_to_str(plane),
661 dst_w, dst_h, dst_x, dst_y,
662 src_w, src_h, src_x, src_y);
663
664 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
665 if (cmd_offset == -1) {
666 DRM_ERROR("No available hqvdp_cmd now\n");
667 return;
668 }
669 cmd = hqvdp->hqvdp_cmd + cmd_offset;
670
671 if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
672 src_w, src_h,
673 dst_w, dst_h)) {
674 DRM_ERROR("Scaling beyond HW capabilities\n");
675 return;
676 }
677
678 /* Static parameters, defaulting to progressive mode */
679 cmd->top.config = TOP_CONFIG_PROGRESSIVE;
680 cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
681 cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
682 cmd->csdi.config = CSDI_CONFIG_PROG;
683
684 /* VC1RE, FMD bypassed : keep everything set to 0
685 * IQI/P2I bypassed */
686 cmd->iqi.config = IQI_CONFIG_DFLT;
687 cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
688 cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
689 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
690
691 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
692 if (!cma_obj) {
693 DRM_ERROR("Can't get CMA GEM object for fb\n");
694 return;
695 }
696
697 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
698 (char *)&fb->pixel_format,
699 (unsigned long)cma_obj->paddr);
700
701 /* Buffer planes address */
702 cmd->top.current_luma = (u32)cma_obj->paddr + fb->offsets[0];
703 cmd->top.current_chroma = (u32)cma_obj->paddr + fb->offsets[1];
704
705 /* Pitches */
706 cmd->top.luma_processed_pitch = fb->pitches[0];
707 cmd->top.luma_src_pitch = fb->pitches[0];
708 cmd->top.chroma_processed_pitch = fb->pitches[1];
709 cmd->top.chroma_src_pitch = fb->pitches[1];
710
711 /* Input / output size
712 * Align to upper even value */
713 dst_w = ALIGN(dst_w, 2);
714 dst_h = ALIGN(dst_h, 2);
715
716 if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
717 (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
718 (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
719 (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
720 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
721 src_w, src_h,
722 dst_w, dst_h);
723 return;
724 }
725
726 cmd->top.input_viewport_size = src_h << 16 | src_w;
727 cmd->top.input_frame_size = src_h << 16 | src_w;
728 cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
729 cmd->top.input_viewport_ori = src_y << 16 | src_x;
730
731 /* Handle interlaced */
732 if (fb->flags & DRM_MODE_FB_INTERLACED) {
733 /* Top field to display */
734 cmd->top.config = TOP_CONFIG_INTER_TOP;
735
736 /* Update pitches and vert size */
737 cmd->top.input_frame_size = (src_h / 2) << 16 | src_w;
738 cmd->top.luma_processed_pitch *= 2;
739 cmd->top.luma_src_pitch *= 2;
740 cmd->top.chroma_processed_pitch *= 2;
741 cmd->top.chroma_src_pitch *= 2;
742
743 /* Enable directional deinterlacing processing */
744 cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
745 cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
746 cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
747 }
748
749 /* Update hvsrc lut coef */
750 scale_h = SCALE_FACTOR * dst_w / src_w;
751 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
752
753 scale_v = SCALE_FACTOR * dst_h / src_h;
754 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
755
756 if (first_prepare) {
757 /* Prevent VTG shutdown */
758 if (clk_prepare_enable(hqvdp->clk_pix_main)) {
759 DRM_ERROR("Failed to prepare/enable pix main clk\n");
760 return;
761 }
762
763 /* Register VTG Vsync callback to handle bottom fields */
764 if (sti_vtg_register_client(hqvdp->vtg,
765 &hqvdp->vtg_nb,
766 mixer->id)) {
767 DRM_ERROR("Cannot register VTG notifier\n");
768 return;
769 }
770 }
771
772 writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
773 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
774
775 hqvdp->curr_field_count++;
776
777 /* Interlaced : get ready to display the bottom field at next Vsync */
778 if (fb->flags & DRM_MODE_FB_INTERLACED)
779 hqvdp->btm_field_pending = true;
780
781 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
782 __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
783
784 plane->status = STI_PLANE_UPDATED;
785}
786
787static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
788 struct drm_plane_state *oldstate)
789{
790 struct sti_plane *plane = to_sti_plane(drm_plane);
791 struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
792
793 if (!drm_plane->crtc) {
794 DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
795 drm_plane->base.id);
796 return;
797 }
798
799 DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
800 drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
801 drm_plane->base.id, sti_plane_to_str(plane));
802
803 plane->status = STI_PLANE_DISABLING;
804}
805
806static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
807 .atomic_update = sti_hqvdp_atomic_update,
808 .atomic_disable = sti_hqvdp_atomic_disable,
809}; 809};
810 810
811struct sti_layer *sti_hqvdp_create(struct device *dev) 811static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
812 struct device *dev, int desc)
812{ 813{
813 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 814 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
815 int res;
816
817 hqvdp->plane.desc = desc;
818 hqvdp->plane.status = STI_PLANE_DISABLED;
814 819
815 hqvdp->layer.ops = &hqvdp_ops; 820 sti_hqvdp_init(hqvdp);
816 821
817 return &hqvdp->layer; 822 res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
823 &sti_plane_helpers_funcs,
824 hqvdp_supported_formats,
825 ARRAY_SIZE(hqvdp_supported_formats),
826 DRM_PLANE_TYPE_OVERLAY);
827 if (res) {
828 DRM_ERROR("Failed to initialize universal plane\n");
829 return NULL;
830 }
831
832 drm_plane_helper_add(&hqvdp->plane.drm_plane, &sti_hqvdp_helpers_funcs);
833
834 sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
835
836 return &hqvdp->plane.drm_plane;
818} 837}
819EXPORT_SYMBOL(sti_hqvdp_create);
820 838
821static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp) 839static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
822{ 840{
@@ -859,6 +877,12 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
859 } *header; 877 } *header;
860 878
861 DRM_DEBUG_DRIVER("\n"); 879 DRM_DEBUG_DRIVER("\n");
880
881 if (hqvdp->xp70_initialized) {
882 DRM_INFO("HQVDP XP70 already initialized\n");
883 return;
884 }
885
862 /* Check firmware parts */ 886 /* Check firmware parts */
863 if (!firmware) { 887 if (!firmware) {
864 DRM_ERROR("Firmware not available\n"); 888 DRM_ERROR("Firmware not available\n");
@@ -946,7 +970,10 @@ static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
946 /* Launch Vsync */ 970 /* Launch Vsync */
947 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC); 971 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
948 972
949 DRM_INFO("HQVDP XP70 started\n"); 973 DRM_INFO("HQVDP XP70 initialized\n");
974
975 hqvdp->xp70_initialized = true;
976
950out: 977out:
951 release_firmware(firmware); 978 release_firmware(firmware);
952} 979}
@@ -955,7 +982,7 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
955{ 982{
956 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev); 983 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
957 struct drm_device *drm_dev = data; 984 struct drm_device *drm_dev = data;
958 struct sti_layer *layer; 985 struct drm_plane *plane;
959 int err; 986 int err;
960 987
961 DRM_DEBUG_DRIVER("\n"); 988 DRM_DEBUG_DRIVER("\n");
@@ -971,13 +998,10 @@ int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
971 return err; 998 return err;
972 } 999 }
973 1000
974 layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs); 1001 /* Create HQVDP plane once xp70 is initialized */
975 if (!layer) { 1002 plane = sti_hqvdp_create(drm_dev, hqvdp->dev, STI_HQVDP_0);
1003 if (!plane)
976 DRM_ERROR("Can't create HQVDP plane\n"); 1004 DRM_ERROR("Can't create HQVDP plane\n");
977 return -ENOMEM;
978 }
979
980 sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
981 1005
982 return 0; 1006 return 0;
983} 1007}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
deleted file mode 100644
index cd5ecd0a6dea..000000000000
--- a/drivers/gpu/drm/sti/sti_hqvdp.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_HQVDP_H_
8#define _STI_HQVDP_H_
9
10struct sti_layer *sti_hqvdp_create(struct device *dev);
11
12#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
deleted file mode 100644
index 899104f9d4bc..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ /dev/null
@@ -1,213 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_gem_cma_helper.h>
11#include <drm/drm_fb_cma_helper.h>
12
13#include "sti_compositor.h"
14#include "sti_cursor.h"
15#include "sti_gdp.h"
16#include "sti_hqvdp.h"
17#include "sti_layer.h"
18#include "sti_vid.h"
19
20const char *sti_layer_to_str(struct sti_layer *layer)
21{
22 switch (layer->desc) {
23 case STI_GDP_0:
24 return "GDP0";
25 case STI_GDP_1:
26 return "GDP1";
27 case STI_GDP_2:
28 return "GDP2";
29 case STI_GDP_3:
30 return "GDP3";
31 case STI_VID_0:
32 return "VID0";
33 case STI_VID_1:
34 return "VID1";
35 case STI_CURSOR:
36 return "CURSOR";
37 case STI_HQVDP_0:
38 return "HQVDP0";
39 default:
40 return "<UNKNOWN LAYER>";
41 }
42}
43EXPORT_SYMBOL(sti_layer_to_str);
44
45struct sti_layer *sti_layer_create(struct device *dev, int desc,
46 void __iomem *baseaddr)
47{
48
49 struct sti_layer *layer = NULL;
50
51 switch (desc & STI_LAYER_TYPE_MASK) {
52 case STI_GDP:
53 layer = sti_gdp_create(dev, desc);
54 break;
55 case STI_VID:
56 layer = sti_vid_create(dev);
57 break;
58 case STI_CUR:
59 layer = sti_cursor_create(dev);
60 break;
61 case STI_VDP:
62 layer = sti_hqvdp_create(dev);
63 break;
64 }
65
66 if (!layer) {
67 DRM_ERROR("Failed to create layer\n");
68 return NULL;
69 }
70
71 layer->desc = desc;
72 layer->dev = dev;
73 layer->regs = baseaddr;
74
75 layer->ops->init(layer);
76
77 DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
78
79 return layer;
80}
81EXPORT_SYMBOL(sti_layer_create);
82
83int sti_layer_prepare(struct sti_layer *layer,
84 struct drm_crtc *crtc,
85 struct drm_framebuffer *fb,
86 struct drm_display_mode *mode, int mixer_id,
87 int dest_x, int dest_y, int dest_w, int dest_h,
88 int src_x, int src_y, int src_w, int src_h)
89{
90 int ret;
91 unsigned int i;
92 struct drm_gem_cma_object *cma_obj;
93
94 if (!layer || !fb || !mode) {
95 DRM_ERROR("Null fb, layer or mode\n");
96 return 1;
97 }
98
99 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
100 if (!cma_obj) {
101 DRM_ERROR("Can't get CMA GEM object for fb\n");
102 return 1;
103 }
104
105 layer->crtc = crtc;
106 layer->fb = fb;
107 layer->mode = mode;
108 layer->mixer_id = mixer_id;
109 layer->dst_x = dest_x;
110 layer->dst_y = dest_y;
111 layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
112 layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
113 layer->src_x = src_x;
114 layer->src_y = src_y;
115 layer->src_w = src_w;
116 layer->src_h = src_h;
117 layer->format = fb->pixel_format;
118 layer->vaddr = cma_obj->vaddr;
119 layer->paddr = cma_obj->paddr;
120 for (i = 0; i < 4; i++) {
121 layer->pitches[i] = fb->pitches[i];
122 layer->offsets[i] = fb->offsets[i];
123 }
124
125 DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
126 sti_layer_to_str(layer),
127 layer->mixer_id);
128 DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
129 sti_layer_to_str(layer),
130 layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
131 layer->src_w, layer->src_h, layer->src_x,
132 layer->src_y);
133
134 DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
135 (char *)&layer->format, (unsigned long)layer->paddr);
136
137 if (!layer->ops->prepare)
138 goto err_no_prepare;
139
140 ret = layer->ops->prepare(layer, !layer->enabled);
141 if (!ret)
142 layer->enabled = true;
143
144 return ret;
145
146err_no_prepare:
147 DRM_ERROR("Cannot prepare\n");
148 return 1;
149}
150
151int sti_layer_commit(struct sti_layer *layer)
152{
153 if (!layer)
154 return 1;
155
156 if (!layer->ops->commit)
157 goto err_no_commit;
158
159 return layer->ops->commit(layer);
160
161err_no_commit:
162 DRM_ERROR("Cannot commit\n");
163 return 1;
164}
165
166int sti_layer_disable(struct sti_layer *layer)
167{
168 int ret;
169
170 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
171 if (!layer)
172 return 1;
173
174 if (!layer->enabled)
175 return 0;
176
177 if (!layer->ops->disable)
178 goto err_no_disable;
179
180 ret = layer->ops->disable(layer);
181 if (!ret)
182 layer->enabled = false;
183 else
184 DRM_ERROR("Disable failed\n");
185
186 return ret;
187
188err_no_disable:
189 DRM_ERROR("Cannot disable\n");
190 return 1;
191}
192
193const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
194{
195 if (!layer)
196 return NULL;
197
198 if (!layer->ops->get_formats)
199 return NULL;
200
201 return layer->ops->get_formats(layer);
202}
203
204unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
205{
206 if (!layer)
207 return 0;
208
209 if (!layer->ops->get_nb_formats)
210 return 0;
211
212 return layer->ops->get_nb_formats(layer);
213}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
deleted file mode 100644
index ceff497f557e..000000000000
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#ifndef _STI_LAYER_H_
10#define _STI_LAYER_H_
11
12#include <drm/drmP.h>
13
14#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
15
16#define STI_LAYER_TYPE_SHIFT 8
17#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
18
19struct sti_layer;
20
21enum sti_layer_type {
22 STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
23 STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
24 STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
25 STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
26 STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
27};
28
29enum sti_layer_id_of_type {
30 STI_ID_0 = 0,
31 STI_ID_1 = 1,
32 STI_ID_2 = 2,
33 STI_ID_3 = 3
34};
35
36enum sti_layer_desc {
37 STI_GDP_0 = STI_GDP | STI_ID_0,
38 STI_GDP_1 = STI_GDP | STI_ID_1,
39 STI_GDP_2 = STI_GDP | STI_ID_2,
40 STI_GDP_3 = STI_GDP | STI_ID_3,
41 STI_VID_0 = STI_VID | STI_ID_0,
42 STI_VID_1 = STI_VID | STI_ID_1,
43 STI_HQVDP_0 = STI_VDP | STI_ID_0,
44 STI_CURSOR = STI_CUR,
45 STI_BACK = STI_BCK
46};
47
48/**
49 * STI layer functions structure
50 *
51 * @get_formats: get layer supported formats
52 * @get_nb_formats: get number of format supported
53 * @init: initialize the layer
54 * @prepare: prepare layer before rendering
55 * @commit: set layer for rendering
56 * @disable: disable layer
57 */
58struct sti_layer_funcs {
59 const uint32_t* (*get_formats)(struct sti_layer *layer);
60 unsigned int (*get_nb_formats)(struct sti_layer *layer);
61 void (*init)(struct sti_layer *layer);
62 int (*prepare)(struct sti_layer *layer, bool first_prepare);
63 int (*commit)(struct sti_layer *layer);
64 int (*disable)(struct sti_layer *layer);
65};
66
67/**
68 * STI layer structure
69 *
70 * @plane: drm plane it is bound to (if any)
71 * @fb: drm fb it is bound to
72 * @crtc: crtc it is bound to
73 * @mode: display mode
74 * @desc: layer type & id
75 * @device: driver device
76 * @regs: layer registers
77 * @ops: layer functions
78 * @zorder: layer z-order
79 * @mixer_id: id of the mixer used to display the layer
80 * @enabled: to know if the layer is active or not
81 * @src_x src_y: coordinates of the input (fb) area
82 * @src_w src_h: size of the input (fb) area
83 * @dst_x dst_y: coordinates of the output (crtc) area
84 * @dst_w dst_h: size of the output (crtc) area
85 * @format: format
86 * @pitches: pitch of 'planes' (eg: Y, U, V)
87 * @offsets: offset of 'planes'
88 * @vaddr: virtual address of the input buffer
89 * @paddr: physical address of the input buffer
90 */
91struct sti_layer {
92 struct drm_plane plane;
93 struct drm_framebuffer *fb;
94 struct drm_crtc *crtc;
95 struct drm_display_mode *mode;
96 enum sti_layer_desc desc;
97 struct device *dev;
98 void __iomem *regs;
99 const struct sti_layer_funcs *ops;
100 int zorder;
101 int mixer_id;
102 bool enabled;
103 int src_x, src_y;
104 int src_w, src_h;
105 int dst_x, dst_y;
106 int dst_w, dst_h;
107 uint32_t format;
108 unsigned int pitches[4];
109 unsigned int offsets[4];
110 void *vaddr;
111 dma_addr_t paddr;
112};
113
114struct sti_layer *sti_layer_create(struct device *dev, int desc,
115 void __iomem *baseaddr);
116int sti_layer_prepare(struct sti_layer *layer,
117 struct drm_crtc *crtc,
118 struct drm_framebuffer *fb,
119 struct drm_display_mode *mode,
120 int mixer_id,
121 int dest_x, int dest_y,
122 int dest_w, int dest_h,
123 int src_x, int src_y,
124 int src_w, int src_h);
125int sti_layer_commit(struct sti_layer *layer);
126int sti_layer_disable(struct sti_layer *layer);
127const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
128unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
129const char *sti_layer_to_str(struct sti_layer *layer);
130
131#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 13a4b84deab6..0182e9365004 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -58,6 +58,7 @@ const char *sti_mixer_to_str(struct sti_mixer *mixer)
58 return "<UNKNOWN MIXER>"; 58 return "<UNKNOWN MIXER>";
59 } 59 }
60} 60}
61EXPORT_SYMBOL(sti_mixer_to_str);
61 62
62static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id) 63static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
63{ 64{
@@ -101,52 +102,57 @@ static void sti_mixer_set_background_area(struct sti_mixer *mixer,
101 sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds); 102 sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
102} 103}
103 104
104int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer) 105int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane)
105{ 106{
106 int layer_id = 0, depth = layer->zorder; 107 int plane_id, depth = plane->zorder;
108 unsigned int i;
107 u32 mask, val; 109 u32 mask, val;
108 110
109 if (depth >= GAM_MIXER_NB_DEPTH_LEVEL) 111 if ((depth < 1) || (depth > GAM_MIXER_NB_DEPTH_LEVEL))
110 return 1; 112 return 1;
111 113
112 switch (layer->desc) { 114 switch (plane->desc) {
113 case STI_GDP_0: 115 case STI_GDP_0:
114 layer_id = GAM_DEPTH_GDP0_ID; 116 plane_id = GAM_DEPTH_GDP0_ID;
115 break; 117 break;
116 case STI_GDP_1: 118 case STI_GDP_1:
117 layer_id = GAM_DEPTH_GDP1_ID; 119 plane_id = GAM_DEPTH_GDP1_ID;
118 break; 120 break;
119 case STI_GDP_2: 121 case STI_GDP_2:
120 layer_id = GAM_DEPTH_GDP2_ID; 122 plane_id = GAM_DEPTH_GDP2_ID;
121 break; 123 break;
122 case STI_GDP_3: 124 case STI_GDP_3:
123 layer_id = GAM_DEPTH_GDP3_ID; 125 plane_id = GAM_DEPTH_GDP3_ID;
124 break; 126 break;
125 case STI_VID_0:
126 case STI_HQVDP_0: 127 case STI_HQVDP_0:
127 layer_id = GAM_DEPTH_VID0_ID; 128 plane_id = GAM_DEPTH_VID0_ID;
128 break;
129 case STI_VID_1:
130 layer_id = GAM_DEPTH_VID1_ID;
131 break; 129 break;
132 case STI_CURSOR: 130 case STI_CURSOR:
133 /* no need to set depth for cursor */ 131 /* no need to set depth for cursor */
134 return 0; 132 return 0;
135 default: 133 default:
136 DRM_ERROR("Unknown layer %d\n", layer->desc); 134 DRM_ERROR("Unknown plane %d\n", plane->desc);
137 return 1; 135 return 1;
138 } 136 }
139 mask = GAM_DEPTH_MASK_ID << (3 * depth); 137
140 layer_id = layer_id << (3 * depth); 138 /* Search if a previous depth was already assigned to the plane */
139 val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
140 for (i = 0; i < GAM_MIXER_NB_DEPTH_LEVEL; i++) {
141 mask = GAM_DEPTH_MASK_ID << (3 * i);
142 if ((val & mask) == plane_id << (3 * i))
143 break;
144 }
145
146 mask |= GAM_DEPTH_MASK_ID << (3 * (depth - 1));
147 plane_id = plane_id << (3 * (depth - 1));
141 148
142 DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer), 149 DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
143 sti_layer_to_str(layer), depth); 150 sti_plane_to_str(plane), depth);
144 dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n", 151 dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
145 layer_id, mask); 152 plane_id, mask);
146 153
147 val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
148 val &= ~mask; 154 val &= ~mask;
149 val |= layer_id; 155 val |= plane_id;
150 sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val); 156 sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
151 157
152 dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n", 158 dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
@@ -176,9 +182,9 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
176 return 0; 182 return 0;
177} 183}
178 184
179static u32 sti_mixer_get_layer_mask(struct sti_layer *layer) 185static u32 sti_mixer_get_plane_mask(struct sti_plane *plane)
180{ 186{
181 switch (layer->desc) { 187 switch (plane->desc) {
182 case STI_BACK: 188 case STI_BACK:
183 return GAM_CTL_BACK_MASK; 189 return GAM_CTL_BACK_MASK;
184 case STI_GDP_0: 190 case STI_GDP_0:
@@ -189,11 +195,8 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
189 return GAM_CTL_GDP2_MASK; 195 return GAM_CTL_GDP2_MASK;
190 case STI_GDP_3: 196 case STI_GDP_3:
191 return GAM_CTL_GDP3_MASK; 197 return GAM_CTL_GDP3_MASK;
192 case STI_VID_0:
193 case STI_HQVDP_0: 198 case STI_HQVDP_0:
194 return GAM_CTL_VID0_MASK; 199 return GAM_CTL_VID0_MASK;
195 case STI_VID_1:
196 return GAM_CTL_VID1_MASK;
197 case STI_CURSOR: 200 case STI_CURSOR:
198 return GAM_CTL_CURSOR_MASK; 201 return GAM_CTL_CURSOR_MASK;
199 default: 202 default:
@@ -201,17 +204,17 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
201 } 204 }
202} 205}
203 206
204int sti_mixer_set_layer_status(struct sti_mixer *mixer, 207int sti_mixer_set_plane_status(struct sti_mixer *mixer,
205 struct sti_layer *layer, bool status) 208 struct sti_plane *plane, bool status)
206{ 209{
207 u32 mask, val; 210 u32 mask, val;
208 211
209 DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable", 212 DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
210 sti_mixer_to_str(mixer), sti_layer_to_str(layer)); 213 sti_mixer_to_str(mixer), sti_plane_to_str(plane));
211 214
212 mask = sti_mixer_get_layer_mask(layer); 215 mask = sti_mixer_get_plane_mask(plane);
213 if (!mask) { 216 if (!mask) {
214 DRM_ERROR("Can not find layer mask\n"); 217 DRM_ERROR("Can't find layer mask\n");
215 return -EINVAL; 218 return -EINVAL;
216 } 219 }
217 220
@@ -223,15 +226,6 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
223 return 0; 226 return 0;
224} 227}
225 228
226void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
227{
228 u32 val;
229
230 DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
231 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
232 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
233}
234
235void sti_mixer_set_matrix(struct sti_mixer *mixer) 229void sti_mixer_set_matrix(struct sti_mixer *mixer)
236{ 230{
237 unsigned int i; 231 unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index b97282182908..efb1a9a5ba86 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -11,10 +11,16 @@
11 11
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13 13
14#include "sti_layer.h" 14#include "sti_plane.h"
15 15
16#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc) 16#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
17 17
18enum sti_mixer_status {
19 STI_MIXER_READY,
20 STI_MIXER_DISABLING,
21 STI_MIXER_DISABLED,
22};
23
18/** 24/**
19 * STI Mixer subdevice structure 25 * STI Mixer subdevice structure
20 * 26 *
@@ -23,33 +29,32 @@
23 * @id: id of the mixer 29 * @id: id of the mixer
24 * @drm_crtc: crtc object link to the mixer 30 * @drm_crtc: crtc object link to the mixer
25 * @pending_event: set if a flip event is pending on crtc 31 * @pending_event: set if a flip event is pending on crtc
26 * @enabled: to know if the mixer is active or not 32 * @status: to know the status of the mixer
27 */ 33 */
28struct sti_mixer { 34struct sti_mixer {
29 struct device *dev; 35 struct device *dev;
30 void __iomem *regs; 36 void __iomem *regs;
31 int id; 37 int id;
32 struct drm_crtc drm_crtc; 38 struct drm_crtc drm_crtc;
33 struct drm_pending_vblank_event *pending_event; 39 struct drm_pending_vblank_event *pending_event;
34 bool enabled; 40 enum sti_mixer_status status;
35}; 41};
36 42
37const char *sti_mixer_to_str(struct sti_mixer *mixer); 43const char *sti_mixer_to_str(struct sti_mixer *mixer);
38 44
39struct sti_mixer *sti_mixer_create(struct device *dev, int id, 45struct sti_mixer *sti_mixer_create(struct device *dev, int id,
40 void __iomem *baseaddr); 46 void __iomem *baseaddr);
41 47
42int sti_mixer_set_layer_status(struct sti_mixer *mixer, 48int sti_mixer_set_plane_status(struct sti_mixer *mixer,
43 struct sti_layer *layer, bool status); 49 struct sti_plane *plane, bool status);
44void sti_mixer_clear_all_layers(struct sti_mixer *mixer); 50int sti_mixer_set_plane_depth(struct sti_mixer *mixer, struct sti_plane *plane);
45int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
46int sti_mixer_active_video_area(struct sti_mixer *mixer, 51int sti_mixer_active_video_area(struct sti_mixer *mixer,
47 struct drm_display_mode *mode); 52 struct drm_display_mode *mode);
48 53
49void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); 54void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
50 55
51/* depth in Cross-bar control = z order */ 56/* depth in Cross-bar control = z order */
52#define GAM_MIXER_NB_DEPTH_LEVEL 7 57#define GAM_MIXER_NB_DEPTH_LEVEL 6
53 58
54#define STI_MIXER_MAIN 0 59#define STI_MIXER_MAIN 0
55#define STI_MIXER_AUX 1 60#define STI_MIXER_AUX 1
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
new file mode 100644
index 000000000000..d5c5e91f2956
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8
9#include <drm/drmP.h>
10#include <drm/drm_fb_cma_helper.h>
11#include <drm/drm_gem_cma_helper.h>
12
13#include "sti_compositor.h"
14#include "sti_drv.h"
15#include "sti_plane.h"
16
17/* (Background) < GDP0 < GDP1 < HQVDP0 < GDP2 < GDP3 < (ForeGround) */
18enum sti_plane_desc sti_plane_default_zorder[] = {
19 STI_GDP_0,
20 STI_GDP_1,
21 STI_HQVDP_0,
22 STI_GDP_2,
23 STI_GDP_3,
24};
25
26const char *sti_plane_to_str(struct sti_plane *plane)
27{
28 switch (plane->desc) {
29 case STI_GDP_0:
30 return "GDP0";
31 case STI_GDP_1:
32 return "GDP1";
33 case STI_GDP_2:
34 return "GDP2";
35 case STI_GDP_3:
36 return "GDP3";
37 case STI_HQVDP_0:
38 return "HQVDP0";
39 case STI_CURSOR:
40 return "CURSOR";
41 default:
42 return "<UNKNOWN PLANE>";
43 }
44}
45EXPORT_SYMBOL(sti_plane_to_str);
46
47static void sti_plane_destroy(struct drm_plane *drm_plane)
48{
49 DRM_DEBUG_DRIVER("\n");
50
51 drm_plane_helper_disable(drm_plane);
52 drm_plane_cleanup(drm_plane);
53}
54
55static int sti_plane_set_property(struct drm_plane *drm_plane,
56 struct drm_property *property,
57 uint64_t val)
58{
59 struct drm_device *dev = drm_plane->dev;
60 struct sti_private *private = dev->dev_private;
61 struct sti_plane *plane = to_sti_plane(drm_plane);
62
63 DRM_DEBUG_DRIVER("\n");
64
65 if (property == private->plane_zorder_property) {
66 plane->zorder = val;
67 return 0;
68 }
69
70 return -EINVAL;
71}
72
73static void sti_plane_attach_zorder_property(struct drm_plane *drm_plane)
74{
75 struct drm_device *dev = drm_plane->dev;
76 struct sti_private *private = dev->dev_private;
77 struct sti_plane *plane = to_sti_plane(drm_plane);
78 struct drm_property *prop;
79
80 prop = private->plane_zorder_property;
81 if (!prop) {
82 prop = drm_property_create_range(dev, 0, "zpos", 1,
83 GAM_MIXER_NB_DEPTH_LEVEL);
84 if (!prop)
85 return;
86
87 private->plane_zorder_property = prop;
88 }
89
90 drm_object_attach_property(&drm_plane->base, prop, plane->zorder);
91}
92
93void sti_plane_init_property(struct sti_plane *plane,
94 enum drm_plane_type type)
95{
96 unsigned int i;
97
98 for (i = 0; i < ARRAY_SIZE(sti_plane_default_zorder); i++)
99 if (sti_plane_default_zorder[i] == plane->desc)
100 break;
101
102 plane->zorder = i + 1;
103
104 if (type == DRM_PLANE_TYPE_OVERLAY)
105 sti_plane_attach_zorder_property(&plane->drm_plane);
106
107 DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%d\n",
108 plane->drm_plane.base.id,
109 sti_plane_to_str(plane), plane->zorder);
110}
111EXPORT_SYMBOL(sti_plane_init_property);
112
113struct drm_plane_funcs sti_plane_helpers_funcs = {
114 .update_plane = drm_atomic_helper_update_plane,
115 .disable_plane = drm_atomic_helper_disable_plane,
116 .destroy = sti_plane_destroy,
117 .set_property = sti_plane_set_property,
118 .reset = drm_atomic_helper_plane_reset,
119 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
120 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
121};
122EXPORT_SYMBOL(sti_plane_helpers_funcs);
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
new file mode 100644
index 000000000000..86f1e6fc81b9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_PLANE_H_
8#define _STI_PLANE_H_
9
10#include <drm/drmP.h>
11#include <drm/drm_atomic_helper.h>
12#include <drm/drm_plane_helper.h>
13
14extern struct drm_plane_funcs sti_plane_helpers_funcs;
15
16#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
17
18#define STI_PLANE_TYPE_SHIFT 8
19#define STI_PLANE_TYPE_MASK (~((1 << STI_PLANE_TYPE_SHIFT) - 1))
20
21enum sti_plane_type {
22 STI_GDP = 1 << STI_PLANE_TYPE_SHIFT,
23 STI_VDP = 2 << STI_PLANE_TYPE_SHIFT,
24 STI_CUR = 3 << STI_PLANE_TYPE_SHIFT,
25 STI_BCK = 4 << STI_PLANE_TYPE_SHIFT
26};
27
28enum sti_plane_id_of_type {
29 STI_ID_0 = 0,
30 STI_ID_1 = 1,
31 STI_ID_2 = 2,
32 STI_ID_3 = 3
33};
34
35enum sti_plane_desc {
36 STI_GDP_0 = STI_GDP | STI_ID_0,
37 STI_GDP_1 = STI_GDP | STI_ID_1,
38 STI_GDP_2 = STI_GDP | STI_ID_2,
39 STI_GDP_3 = STI_GDP | STI_ID_3,
40 STI_HQVDP_0 = STI_VDP | STI_ID_0,
41 STI_CURSOR = STI_CUR,
42 STI_BACK = STI_BCK
43};
44
45enum sti_plane_status {
46 STI_PLANE_READY,
47 STI_PLANE_UPDATED,
48 STI_PLANE_DISABLING,
49 STI_PLANE_FLUSHING,
50 STI_PLANE_DISABLED,
51};
52
53/**
54 * STI plane structure
55 *
56 * @plane: drm plane it is bound to (if any)
57 * @desc: plane type & id
58 * @status: to know the status of the plane
59 * @zorder: plane z-order
60 */
61struct sti_plane {
62 struct drm_plane drm_plane;
63 enum sti_plane_desc desc;
64 enum sti_plane_status status;
65 int zorder;
66};
67
68const char *sti_plane_to_str(struct sti_plane *plane);
69void sti_plane_init_property(struct sti_plane *plane,
70 enum drm_plane_type type);
71#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 5cc53116508e..c1aac8e66fb5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,7 +16,7 @@
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
18 18
19#include "sti_drm_crtc.h" 19#include "sti_crtc.h"
20 20
21/* glue registers */ 21/* glue registers */
22#define TVO_CSC_MAIN_M0 0x000 22#define TVO_CSC_MAIN_M0 0x000
@@ -473,7 +473,7 @@ static void sti_dvo_encoder_commit(struct drm_encoder *encoder)
473{ 473{
474 struct sti_tvout *tvout = to_sti_tvout(encoder); 474 struct sti_tvout *tvout = to_sti_tvout(encoder);
475 475
476 tvout_dvo_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 476 tvout_dvo_start(tvout, sti_crtc_is_main(encoder->crtc));
477} 477}
478 478
479static void sti_dvo_encoder_disable(struct drm_encoder *encoder) 479static void sti_dvo_encoder_disable(struct drm_encoder *encoder)
@@ -523,7 +523,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
523{ 523{
524 struct sti_tvout *tvout = to_sti_tvout(encoder); 524 struct sti_tvout *tvout = to_sti_tvout(encoder);
525 525
526 tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 526 tvout_hda_start(tvout, sti_crtc_is_main(encoder->crtc));
527} 527}
528 528
529static void sti_hda_encoder_disable(struct drm_encoder *encoder) 529static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -575,7 +575,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
575{ 575{
576 struct sti_tvout *tvout = to_sti_tvout(encoder); 576 struct sti_tvout *tvout = to_sti_tvout(encoder);
577 577
578 tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc)); 578 tvout_hdmi_start(tvout, sti_crtc_is_main(encoder->crtc));
579} 579}
580 580
581static void sti_hdmi_encoder_disable(struct drm_encoder *encoder) 581static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -644,7 +644,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
644 struct sti_tvout *tvout = dev_get_drvdata(dev); 644 struct sti_tvout *tvout = dev_get_drvdata(dev);
645 struct drm_device *drm_dev = data; 645 struct drm_device *drm_dev = data;
646 unsigned int i; 646 unsigned int i;
647 int ret;
648 647
649 tvout->drm_dev = drm_dev; 648 tvout->drm_dev = drm_dev;
650 649
@@ -658,17 +657,15 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
658 657
659 sti_tvout_create_encoders(drm_dev, tvout); 658 sti_tvout_create_encoders(drm_dev, tvout);
660 659
661 ret = component_bind_all(dev, drm_dev); 660 return 0;
662 if (ret)
663 sti_tvout_destroy_encoders(tvout);
664
665 return ret;
666} 661}
667 662
668static void sti_tvout_unbind(struct device *dev, struct device *master, 663static void sti_tvout_unbind(struct device *dev, struct device *master,
669 void *data) 664 void *data)
670{ 665{
671 /* do nothing */ 666 struct sti_tvout *tvout = dev_get_drvdata(dev);
667
668 sti_tvout_destroy_encoders(tvout);
672} 669}
673 670
674static const struct component_ops sti_tvout_ops = { 671static const struct component_ops sti_tvout_ops = {
@@ -676,34 +673,12 @@ static const struct component_ops sti_tvout_ops = {
676 .unbind = sti_tvout_unbind, 673 .unbind = sti_tvout_unbind,
677}; 674};
678 675
679static int compare_of(struct device *dev, void *data)
680{
681 return dev->of_node == data;
682}
683
684static int sti_tvout_master_bind(struct device *dev)
685{
686 return 0;
687}
688
689static void sti_tvout_master_unbind(struct device *dev)
690{
691 /* do nothing */
692}
693
694static const struct component_master_ops sti_tvout_master_ops = {
695 .bind = sti_tvout_master_bind,
696 .unbind = sti_tvout_master_unbind,
697};
698
699static int sti_tvout_probe(struct platform_device *pdev) 676static int sti_tvout_probe(struct platform_device *pdev)
700{ 677{
701 struct device *dev = &pdev->dev; 678 struct device *dev = &pdev->dev;
702 struct device_node *node = dev->of_node; 679 struct device_node *node = dev->of_node;
703 struct sti_tvout *tvout; 680 struct sti_tvout *tvout;
704 struct resource *res; 681 struct resource *res;
705 struct device_node *child_np;
706 struct component_match *match = NULL;
707 682
708 DRM_INFO("%s\n", __func__); 683 DRM_INFO("%s\n", __func__);
709 684
@@ -734,24 +709,11 @@ static int sti_tvout_probe(struct platform_device *pdev)
734 709
735 platform_set_drvdata(pdev, tvout); 710 platform_set_drvdata(pdev, tvout);
736 711
737 of_platform_populate(node, NULL, NULL, dev);
738
739 child_np = of_get_next_available_child(node, NULL);
740
741 while (child_np) {
742 component_match_add(dev, &match, compare_of, child_np);
743 of_node_put(child_np);
744 child_np = of_get_next_available_child(node, child_np);
745 }
746
747 component_master_add_with_match(dev, &sti_tvout_master_ops, match);
748
749 return component_add(dev, &sti_tvout_ops); 712 return component_add(dev, &sti_tvout_ops);
750} 713}
751 714
752static int sti_tvout_remove(struct platform_device *pdev) 715static int sti_tvout_remove(struct platform_device *pdev)
753{ 716{
754 component_master_del(&pdev->dev, &sti_tvout_master_ops);
755 component_del(&pdev->dev, &sti_tvout_ops); 717 component_del(&pdev->dev, &sti_tvout_ops);
756 return 0; 718 return 0;
757} 719}
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 10ced6a479f4..a8254cc362a1 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -6,7 +6,7 @@
6 6
7#include <drm/drmP.h> 7#include <drm/drmP.h>
8 8
9#include "sti_layer.h" 9#include "sti_plane.h"
10#include "sti_vid.h" 10#include "sti_vid.h"
11#include "sti_vtg.h" 11#include "sti_vtg.h"
12 12
@@ -43,35 +43,37 @@
43#define VID_MPR2_BT709 0x07150545 43#define VID_MPR2_BT709 0x07150545
44#define VID_MPR3_BT709 0x00000AE8 44#define VID_MPR3_BT709 0x00000AE8
45 45
46static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare) 46void sti_vid_commit(struct sti_vid *vid,
47 struct drm_plane_state *state)
47{ 48{
48 u32 val; 49 struct drm_crtc *crtc = state->crtc;
50 struct drm_display_mode *mode = &crtc->mode;
51 int dst_x = state->crtc_x;
52 int dst_y = state->crtc_y;
53 int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
54 int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
55 u32 val, ydo, xdo, yds, xds;
56
57 /* Input / output size
58 * Align to upper even value */
59 dst_w = ALIGN(dst_w, 2);
60 dst_h = ALIGN(dst_h, 2);
49 61
50 /* Unmask */ 62 /* Unmask */
51 val = readl(vid->regs + VID_CTL); 63 val = readl(vid->regs + VID_CTL);
52 val &= ~VID_CTL_IGNORE; 64 val &= ~VID_CTL_IGNORE;
53 writel(val, vid->regs + VID_CTL); 65 writel(val, vid->regs + VID_CTL);
54 66
55 return 0; 67 ydo = sti_vtg_get_line_number(*mode, dst_y);
56} 68 yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
57 69 xdo = sti_vtg_get_pixel_number(*mode, dst_x);
58static int sti_vid_commit_layer(struct sti_layer *vid) 70 xds = sti_vtg_get_pixel_number(*mode, dst_x + dst_w - 1);
59{
60 struct drm_display_mode *mode = vid->mode;
61 u32 ydo, xdo, yds, xds;
62
63 ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
64 yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
65 xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
66 xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
67 71
68 writel((ydo << 16) | xdo, vid->regs + VID_VPO); 72 writel((ydo << 16) | xdo, vid->regs + VID_VPO);
69 writel((yds << 16) | xds, vid->regs + VID_VPS); 73 writel((yds << 16) | xds, vid->regs + VID_VPS);
70
71 return 0;
72} 74}
73 75
74static int sti_vid_disable_layer(struct sti_layer *vid) 76void sti_vid_disable(struct sti_vid *vid)
75{ 77{
76 u32 val; 78 u32 val;
77 79
@@ -79,21 +81,9 @@ static int sti_vid_disable_layer(struct sti_layer *vid)
79 val = readl(vid->regs + VID_CTL); 81 val = readl(vid->regs + VID_CTL);
80 val |= VID_CTL_IGNORE; 82 val |= VID_CTL_IGNORE;
81 writel(val, vid->regs + VID_CTL); 83 writel(val, vid->regs + VID_CTL);
82
83 return 0;
84} 84}
85 85
86static const uint32_t *sti_vid_get_formats(struct sti_layer *layer) 86static void sti_vid_init(struct sti_vid *vid)
87{
88 return NULL;
89}
90
91static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
92{
93 return 0;
94}
95
96static void sti_vid_init(struct sti_layer *vid)
97{ 87{
98 /* Enable PSI, Mask layer */ 88 /* Enable PSI, Mask layer */
99 writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL); 89 writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
@@ -113,18 +103,10 @@ static void sti_vid_init(struct sti_layer *vid)
113 writel(VID_CSAT_DFLT, vid->regs + VID_CSAT); 103 writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
114} 104}
115 105
116static const struct sti_layer_funcs vid_ops = { 106struct sti_vid *sti_vid_create(struct device *dev, int id,
117 .get_formats = sti_vid_get_formats, 107 void __iomem *baseaddr)
118 .get_nb_formats = sti_vid_get_nb_formats,
119 .init = sti_vid_init,
120 .prepare = sti_vid_prepare_layer,
121 .commit = sti_vid_commit_layer,
122 .disable = sti_vid_disable_layer,
123};
124
125struct sti_layer *sti_vid_create(struct device *dev)
126{ 108{
127 struct sti_layer *vid; 109 struct sti_vid *vid;
128 110
129 vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL); 111 vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
130 if (!vid) { 112 if (!vid) {
@@ -132,7 +114,11 @@ struct sti_layer *sti_vid_create(struct device *dev)
132 return NULL; 114 return NULL;
133 } 115 }
134 116
135 vid->ops = &vid_ops; 117 vid->dev = dev;
118 vid->regs = baseaddr;
119 vid->id = id;
120
121 sti_vid_init(vid);
136 122
137 return vid; 123 return vid;
138} 124}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 2c0aecd63294..5dea4791f1d6 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -7,6 +7,23 @@
7#ifndef _STI_VID_H_ 7#ifndef _STI_VID_H_
8#define _STI_VID_H_ 8#define _STI_VID_H_
9 9
10struct sti_layer *sti_vid_create(struct device *dev); 10/**
11 * STI VID structure
12 *
13 * @dev: driver device
14 * @regs: vid registers
15 * @id: id of the vid
16 */
17struct sti_vid {
18 struct device *dev;
19 void __iomem *regs;
20 int id;
21};
22
23void sti_vid_commit(struct sti_vid *vid,
24 struct drm_plane_state *state);
25void sti_vid_disable(struct sti_vid *vid);
26struct sti_vid *sti_vid_create(struct device *dev, int id,
27 void __iomem *baseaddr);
11 28
12#endif 29#endif