aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/mdp
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:52:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:52:01 -0500
commit988adfdffdd43cfd841df734664727993076d7cb (patch)
tree6794f7bba8f595500c2b7d33376ad6614adcfaf2 /drivers/gpu/drm/msm/mdp
parent26178ec11ef3c6c814bf16a0a2b9c2f7242e3c64 (diff)
parent4e0cd68115620bc3236ff4e58e4c073948629b41 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "Highlights: - AMD KFD driver merge This is the AMD HSA interface for exposing a lowlevel interface for GPGPU use. They have an open source userspace built on top of this interface, and the code looks as good as it was going to get out of tree. - Initial atomic modesetting work The need for an atomic modesetting interface to allow userspace to try and send a complete set of modesetting state to the driver has arisen, and been suffering from neglect this past year. No more, the start of the common code and changes for msm driver to use it are in this tree. Ongoing work to get the userspace ioctl finished and the code clean will probably wait until next kernel. - DisplayID 1.3 and tiled monitor exposed to userspace. Tiled monitor property is now exposed for userspace to make use of. - Rockchip drm driver merged. - imx gpu driver moved out of staging Other stuff: - core: panel - MIPI DSI + new panels. expose suggested x/y properties for virtual GPUs - i915: Initial Skylake (SKL) support gen3/4 reset work start of dri1/ums removal infoframe tracking fixes for lots of things. - nouveau: tegra k1 voltage support GM204 modesetting support GT21x memory reclocking work - radeon: CI dpm fixes GPUVM improvements Initial DPM fan control - rcar-du: HDMI support added removed some support for old boards slave encoder driver for Analog Devices adv7511 - exynos: Exynos4415 SoC support - msm: a4xx gpu support atomic helper conversion - tegra: iommu support universal plane support ganged-mode DSI support - sti: HDMI i2c improvements - vmwgfx: some late fixes. - qxl: use suggested x/y properties" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (969 commits) drm: sti: fix module compilation issue drm/i915: save/restore GMBUS freq across suspend/resume on gen4 drm: sti: correctly cleanup CRTC and planes drm: sti: add HQVDP plane drm: sti: add cursor plane drm: sti: enable auxiliary CRTC drm: sti: fix delay in VTG programming drm: sti: prepare sti_tvout to support auxiliary crtc drm: sti: use drm_crtc_vblank_{on/off} instead of drm_vblank_{on/off} drm: sti: fix hdmi avi infoframe drm: sti: remove event lock while disabling vblank drm: sti: simplify gdp code drm: sti: clear all mixer control drm: sti: remove gpio for HDMI hot plug detection drm: sti: allow to change hdmi ddc i2c adapter drm/doc: Document drm_add_modes_noedid() usage drm/i915: Remove '& 0xffff' from the mask given to WA_REG() drm/i915: Invert the mask and val arguments in wa_add() and WA_REG() drm: Zero out DRM object memory upon cleanup drm/i915/bdw: Fix the write setting up the WIZ hashing mode ...
Diffstat (limited to 'drivers/gpu/drm/msm/mdp')
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c348
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c121
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c207
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h91
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c466
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c322
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h122
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c273
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h131
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c328
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c241
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h23
19 files changed, 1853 insertions, 992 deletions
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 03c0bd9cd5b9..a4a7f8c7122a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 7d00f7fb5773..a7672e100d8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -25,8 +25,6 @@
25struct mdp4_crtc { 25struct mdp4_crtc {
26 struct drm_crtc base; 26 struct drm_crtc base;
27 char name[8]; 27 char name[8];
28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
30 int id; 28 int id;
31 int ovlp; 29 int ovlp;
32 enum mdp4_dma dma; 30 enum mdp4_dma dma;
@@ -52,25 +50,11 @@ struct mdp4_crtc {
52 50
53 /* if there is a pending flip, these will be non-null: */ 51 /* if there is a pending flip, these will be non-null: */
54 struct drm_pending_vblank_event *event; 52 struct drm_pending_vblank_event *event;
55 struct msm_fence_cb pageflip_cb;
56 53
57#define PENDING_CURSOR 0x1 54#define PENDING_CURSOR 0x1
58#define PENDING_FLIP 0x2 55#define PENDING_FLIP 0x2
59 atomic_t pending; 56 atomic_t pending;
60 57
61 /* the fb that we logically (from PoV of KMS API) hold a ref
62 * to. Which we may not yet be scanning out (we may still
63 * be scanning out previous in case of page_flip while waiting
64 * for gpu rendering to complete:
65 */
66 struct drm_framebuffer *fb;
67
68 /* the fb that we currently hold a scanout ref to: */
69 struct drm_framebuffer *scanout_fb;
70
71 /* for unref'ing framebuffers after scanout completes: */
72 struct drm_flip_work unref_fb_work;
73
74 /* for unref'ing cursor bo's after scanout completes: */ 58 /* for unref'ing cursor bo's after scanout completes: */
75 struct drm_flip_work unref_cursor_work; 59 struct drm_flip_work unref_cursor_work;
76 60
@@ -97,15 +81,14 @@ static void crtc_flush(struct drm_crtc *crtc)
97{ 81{
98 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 82 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
99 struct mdp4_kms *mdp4_kms = get_kms(crtc); 83 struct mdp4_kms *mdp4_kms = get_kms(crtc);
100 uint32_t i, flush = 0; 84 struct drm_plane *plane;
85 uint32_t flush = 0;
101 86
102 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { 87 drm_atomic_crtc_for_each_plane(plane, crtc) {
103 struct drm_plane *plane = mdp4_crtc->planes[i]; 88 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
104 if (plane) { 89 flush |= pipe2flush(pipe_id);
105 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
106 flush |= pipe2flush(pipe_id);
107 }
108 } 90 }
91
109 flush |= ovlp2flush(mdp4_crtc->ovlp); 92 flush |= ovlp2flush(mdp4_crtc->ovlp);
110 93
111 DBG("%s: flush=%08x", mdp4_crtc->name, flush); 94 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -113,47 +96,6 @@ static void crtc_flush(struct drm_crtc *crtc)
113 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); 96 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
114} 97}
115 98
116static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
117{
118 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
119 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
120
121 /* grab reference to incoming scanout fb: */
122 drm_framebuffer_reference(new_fb);
123 mdp4_crtc->base.primary->fb = new_fb;
124 mdp4_crtc->fb = new_fb;
125
126 if (old_fb)
127 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
128}
129
130/* unlike update_fb(), take a ref to the new scanout fb *before* updating
131 * plane, then call this. Needed to ensure we don't unref the buffer that
132 * is actually still being scanned out.
133 *
134 * Note that this whole thing goes away with atomic.. since we can defer
135 * calling into driver until rendering is done.
136 */
137static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
138{
139 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
140
141 /* flush updates, to make sure hw is updated to new scanout fb,
142 * so that we can safely queue unref to current fb (ie. next
143 * vblank we know hw is done w/ previous scanout_fb).
144 */
145 crtc_flush(crtc);
146
147 if (mdp4_crtc->scanout_fb)
148 drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
149 mdp4_crtc->scanout_fb);
150
151 mdp4_crtc->scanout_fb = fb;
152
153 /* enable vblank to complete flip: */
154 request_pending(crtc, PENDING_FLIP);
155}
156
157/* if file!=NULL, this is preclose potential cancel-flip path */ 99/* if file!=NULL, this is preclose potential cancel-flip path */
158static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) 100static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
159{ 101{
@@ -171,38 +113,13 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
171 */ 113 */
172 if (!file || (event->base.file_priv == file)) { 114 if (!file || (event->base.file_priv == file)) {
173 mdp4_crtc->event = NULL; 115 mdp4_crtc->event = NULL;
116 DBG("%s: send event: %p", mdp4_crtc->name, event);
174 drm_send_vblank_event(dev, mdp4_crtc->id, event); 117 drm_send_vblank_event(dev, mdp4_crtc->id, event);
175 } 118 }
176 } 119 }
177 spin_unlock_irqrestore(&dev->event_lock, flags); 120 spin_unlock_irqrestore(&dev->event_lock, flags);
178} 121}
179 122
180static void pageflip_cb(struct msm_fence_cb *cb)
181{
182 struct mdp4_crtc *mdp4_crtc =
183 container_of(cb, struct mdp4_crtc, pageflip_cb);
184 struct drm_crtc *crtc = &mdp4_crtc->base;
185 struct drm_framebuffer *fb = crtc->primary->fb;
186
187 if (!fb)
188 return;
189
190 drm_framebuffer_reference(fb);
191 mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
192 update_scanout(crtc, fb);
193}
194
195static void unref_fb_worker(struct drm_flip_work *work, void *val)
196{
197 struct mdp4_crtc *mdp4_crtc =
198 container_of(work, struct mdp4_crtc, unref_fb_work);
199 struct drm_device *dev = mdp4_crtc->base.dev;
200
201 mutex_lock(&dev->mode_config.mutex);
202 drm_framebuffer_unreference(val);
203 mutex_unlock(&dev->mode_config.mutex);
204}
205
206static void unref_cursor_worker(struct drm_flip_work *work, void *val) 123static void unref_cursor_worker(struct drm_flip_work *work, void *val)
207{ 124{
208 struct mdp4_crtc *mdp4_crtc = 125 struct mdp4_crtc *mdp4_crtc =
@@ -218,7 +135,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
218 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 135 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
219 136
220 drm_crtc_cleanup(crtc); 137 drm_crtc_cleanup(crtc);
221 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
222 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); 138 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
223 139
224 kfree(mdp4_crtc); 140 kfree(mdp4_crtc);
@@ -251,57 +167,70 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
251 return true; 167 return true;
252} 168}
253 169
254static void blend_setup(struct drm_crtc *crtc) 170/* statically (for now) map planes to mixer stage (z-order): */
171static const int idxs[] = {
172 [VG1] = 1,
173 [VG2] = 2,
174 [RGB1] = 0,
175 [RGB2] = 0,
176 [RGB3] = 0,
177 [VG3] = 3,
178 [VG4] = 4,
179
180};
181
182/* setup mixer config, for which we need to consider all crtc's and
183 * the planes attached to them
184 *
185 * TODO may possibly need some extra locking here
186 */
187static void setup_mixer(struct mdp4_kms *mdp4_kms)
255{ 188{
256 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 189 struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
257 struct mdp4_kms *mdp4_kms = get_kms(crtc); 190 struct drm_crtc *crtc;
258 int i, ovlp = mdp4_crtc->ovlp;
259 uint32_t mixer_cfg = 0; 191 uint32_t mixer_cfg = 0;
260 static const enum mdp_mixer_stage_id stages[] = { 192 static const enum mdp_mixer_stage_id stages[] = {
261 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, 193 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
262 }; 194 };
263 /* statically (for now) map planes to mixer stage (z-order): */
264 static const int idxs[] = {
265 [VG1] = 1,
266 [VG2] = 2,
267 [RGB1] = 0,
268 [RGB2] = 0,
269 [RGB3] = 0,
270 [VG3] = 3,
271 [VG4] = 4,
272 195
273 }; 196 list_for_each_entry(crtc, &config->crtc_list, head) {
274 bool alpha[4]= { false, false, false, false }; 197 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
198 struct drm_plane *plane;
275 199
276 /* Don't rely on value read back from hw, but instead use our 200 drm_atomic_crtc_for_each_plane(plane, crtc) {
277 * own shadowed value. Possibly disable/reenable looses the 201 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
278 * previous value and goes back to power-on default? 202 int idx = idxs[pipe_id];
279 */ 203 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
280 mixer_cfg = mdp4_kms->mixer_cfg; 204 pipe_id, stages[idx]);
205 }
206 }
207
208 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
209}
210
211static void blend_setup(struct drm_crtc *crtc)
212{
213 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
214 struct mdp4_kms *mdp4_kms = get_kms(crtc);
215 struct drm_plane *plane;
216 int i, ovlp = mdp4_crtc->ovlp;
217 bool alpha[4]= { false, false, false, false };
281 218
282 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); 219 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
283 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); 220 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
284 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); 221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
285 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); 222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
286 223
287 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { 224 drm_atomic_crtc_for_each_plane(plane, crtc) {
288 struct drm_plane *plane = mdp4_crtc->planes[i]; 225 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
289 if (plane) { 226 int idx = idxs[pipe_id];
290 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); 227 if (idx > 0) {
291 int idx = idxs[pipe_id]; 228 const struct mdp_format *format =
292 if (idx > 0) {
293 const struct mdp_format *format =
294 to_mdp_format(msm_framebuffer_format(plane->fb)); 229 to_mdp_format(msm_framebuffer_format(plane->fb));
295 alpha[idx-1] = format->alpha_enable; 230 alpha[idx-1] = format->alpha_enable;
296 }
297 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
298 pipe_id, stages[idx]);
299 } 231 }
300 } 232 }
301 233
302 /* this shouldn't happen.. and seems to cause underflow: */
303 WARN_ON(!mixer_cfg);
304
305 for (i = 0; i < 4; i++) { 234 for (i = 0; i < 4; i++) {
306 uint32_t op; 235 uint32_t op;
307 236
@@ -324,22 +253,21 @@ static void blend_setup(struct drm_crtc *crtc)
324 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); 253 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
325 } 254 }
326 255
327 mdp4_kms->mixer_cfg = mixer_cfg; 256 setup_mixer(mdp4_kms);
328 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
329} 257}
330 258
331static int mdp4_crtc_mode_set(struct drm_crtc *crtc, 259static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
332 struct drm_display_mode *mode,
333 struct drm_display_mode *adjusted_mode,
334 int x, int y,
335 struct drm_framebuffer *old_fb)
336{ 260{
337 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 261 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
338 struct mdp4_kms *mdp4_kms = get_kms(crtc); 262 struct mdp4_kms *mdp4_kms = get_kms(crtc);
339 enum mdp4_dma dma = mdp4_crtc->dma; 263 enum mdp4_dma dma = mdp4_crtc->dma;
340 int ret, ovlp = mdp4_crtc->ovlp; 264 int ovlp = mdp4_crtc->ovlp;
265 struct drm_display_mode *mode;
266
267 if (WARN_ON(!crtc->state))
268 return;
341 269
342 mode = adjusted_mode; 270 mode = &crtc->state->adjusted_mode;
343 271
344 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 272 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
345 mdp4_crtc->name, mode->base.id, mode->name, 273 mdp4_crtc->name, mode->base.id, mode->name,
@@ -350,28 +278,13 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
350 mode->vsync_end, mode->vtotal, 278 mode->vsync_end, mode->vtotal,
351 mode->type, mode->flags); 279 mode->type, mode->flags);
352 280
353 /* grab extra ref for update_scanout() */
354 drm_framebuffer_reference(crtc->primary->fb);
355
356 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
357 0, 0, mode->hdisplay, mode->vdisplay,
358 x << 16, y << 16,
359 mode->hdisplay << 16, mode->vdisplay << 16);
360 if (ret) {
361 drm_framebuffer_unreference(crtc->primary->fb);
362 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
363 mdp4_crtc->name, ret);
364 return ret;
365 }
366
367 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), 281 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
368 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | 282 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
369 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); 283 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
370 284
371 /* take data from pipe: */ 285 /* take data from pipe: */
372 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); 286 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
373 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 287 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
374 crtc->primary->fb->pitches[0]);
375 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), 288 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
376 MDP4_DMA_DST_SIZE_WIDTH(0) | 289 MDP4_DMA_DST_SIZE_WIDTH(0) |
377 MDP4_DMA_DST_SIZE_HEIGHT(0)); 290 MDP4_DMA_DST_SIZE_HEIGHT(0));
@@ -380,8 +293,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
380 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp), 293 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
381 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | 294 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
382 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); 295 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
383 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 296 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
384 crtc->primary->fb->pitches[0]);
385 297
386 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); 298 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
387 299
@@ -390,11 +302,6 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
390 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); 302 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
391 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); 303 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
392 } 304 }
393
394 update_fb(crtc, crtc->primary->fb);
395 update_scanout(crtc, crtc->primary->fb);
396
397 return 0;
398} 305}
399 306
400static void mdp4_crtc_prepare(struct drm_crtc *crtc) 307static void mdp4_crtc_prepare(struct drm_crtc *crtc)
@@ -416,60 +323,51 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
416 drm_crtc_vblank_put(crtc); 323 drm_crtc_vblank_put(crtc);
417} 324}
418 325
419static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 326static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
420 struct drm_framebuffer *old_fb) 327{
328}
329
330static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
331 struct drm_crtc_state *state)
421{ 332{
422 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 333 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
423 struct drm_plane *plane = mdp4_crtc->plane; 334 struct drm_device *dev = crtc->dev;
424 struct drm_display_mode *mode = &crtc->mode;
425 int ret;
426 335
427 /* grab extra ref for update_scanout() */ 336 DBG("%s: check", mdp4_crtc->name);
428 drm_framebuffer_reference(crtc->primary->fb);
429 337
430 ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb, 338 if (mdp4_crtc->event) {
431 0, 0, mode->hdisplay, mode->vdisplay, 339 dev_err(dev->dev, "already pending flip!\n");
432 x << 16, y << 16, 340 return -EBUSY;
433 mode->hdisplay << 16, mode->vdisplay << 16);
434 if (ret) {
435 drm_framebuffer_unreference(crtc->primary->fb);
436 return ret;
437 } 341 }
438 342
439 update_fb(crtc, crtc->primary->fb); 343 // TODO anything else to check?
440 update_scanout(crtc, crtc->primary->fb);
441 344
442 return 0; 345 return 0;
443} 346}
444 347
445static void mdp4_crtc_load_lut(struct drm_crtc *crtc) 348static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
446{ 349{
350 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
351 DBG("%s: begin", mdp4_crtc->name);
447} 352}
448 353
449static int mdp4_crtc_page_flip(struct drm_crtc *crtc, 354static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
450 struct drm_framebuffer *new_fb,
451 struct drm_pending_vblank_event *event,
452 uint32_t page_flip_flags)
453{ 355{
454 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 356 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
455 struct drm_device *dev = crtc->dev; 357 struct drm_device *dev = crtc->dev;
456 struct drm_gem_object *obj;
457 unsigned long flags; 358 unsigned long flags;
458 359
459 if (mdp4_crtc->event) { 360 DBG("%s: flush", mdp4_crtc->name);
460 dev_err(dev->dev, "already pending flip!\n");
461 return -EBUSY;
462 }
463 361
464 obj = msm_framebuffer_bo(new_fb, 0); 362 WARN_ON(mdp4_crtc->event);
465 363
466 spin_lock_irqsave(&dev->event_lock, flags); 364 spin_lock_irqsave(&dev->event_lock, flags);
467 mdp4_crtc->event = event; 365 mdp4_crtc->event = crtc->state->event;
468 spin_unlock_irqrestore(&dev->event_lock, flags); 366 spin_unlock_irqrestore(&dev->event_lock, flags);
469 367
470 update_fb(crtc, new_fb); 368 blend_setup(crtc);
471 369 crtc_flush(crtc);
472 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); 370 request_pending(crtc, PENDING_FLIP);
473} 371}
474 372
475static int mdp4_crtc_set_property(struct drm_crtc *crtc, 373static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -607,22 +505,29 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
607} 505}
608 506
609static const struct drm_crtc_funcs mdp4_crtc_funcs = { 507static const struct drm_crtc_funcs mdp4_crtc_funcs = {
610 .set_config = drm_crtc_helper_set_config, 508 .set_config = drm_atomic_helper_set_config,
611 .destroy = mdp4_crtc_destroy, 509 .destroy = mdp4_crtc_destroy,
612 .page_flip = mdp4_crtc_page_flip, 510 .page_flip = drm_atomic_helper_page_flip,
613 .set_property = mdp4_crtc_set_property, 511 .set_property = mdp4_crtc_set_property,
614 .cursor_set = mdp4_crtc_cursor_set, 512 .cursor_set = mdp4_crtc_cursor_set,
615 .cursor_move = mdp4_crtc_cursor_move, 513 .cursor_move = mdp4_crtc_cursor_move,
514 .reset = drm_atomic_helper_crtc_reset,
515 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
516 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
616}; 517};
617 518
618static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { 519static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
619 .dpms = mdp4_crtc_dpms, 520 .dpms = mdp4_crtc_dpms,
620 .mode_fixup = mdp4_crtc_mode_fixup, 521 .mode_fixup = mdp4_crtc_mode_fixup,
621 .mode_set = mdp4_crtc_mode_set, 522 .mode_set_nofb = mdp4_crtc_mode_set_nofb,
523 .mode_set = drm_helper_crtc_mode_set,
524 .mode_set_base = drm_helper_crtc_mode_set_base,
622 .prepare = mdp4_crtc_prepare, 525 .prepare = mdp4_crtc_prepare,
623 .commit = mdp4_crtc_commit, 526 .commit = mdp4_crtc_commit,
624 .mode_set_base = mdp4_crtc_mode_set_base,
625 .load_lut = mdp4_crtc_load_lut, 527 .load_lut = mdp4_crtc_load_lut,
528 .atomic_check = mdp4_crtc_atomic_check,
529 .atomic_begin = mdp4_crtc_atomic_begin,
530 .atomic_flush = mdp4_crtc_atomic_flush,
626}; 531};
627 532
628static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 533static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -638,7 +543,6 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
638 543
639 if (pending & PENDING_FLIP) { 544 if (pending & PENDING_FLIP) {
640 complete_flip(crtc, NULL); 545 complete_flip(crtc, NULL);
641 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
642 } 546 }
643 547
644 if (pending & PENDING_CURSOR) { 548 if (pending & PENDING_CURSOR) {
@@ -663,7 +567,8 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
663 567
664void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) 568void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
665{ 569{
666 DBG("cancel: %p", file); 570 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
571 DBG("%s: cancel: %p", mdp4_crtc->name, file);
667 complete_flip(crtc, file); 572 complete_flip(crtc, file);
668} 573}
669 574
@@ -717,35 +622,6 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
717 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); 622 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
718} 623}
719 624
720static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
721 struct drm_plane *plane)
722{
723 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
724
725 BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
726
727 if (mdp4_crtc->planes[pipe_id] == plane)
728 return;
729
730 mdp4_crtc->planes[pipe_id] = plane;
731 blend_setup(crtc);
732 if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
733 crtc_flush(crtc);
734}
735
736void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
737{
738 set_attach(crtc, mdp4_plane_pipe(plane), plane);
739}
740
741void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
742{
743 /* don't actually detatch our primary plane: */
744 if (to_mdp4_crtc(crtc)->plane == plane)
745 return;
746 set_attach(crtc, mdp4_plane_pipe(plane), NULL);
747}
748
749static const char *dma_names[] = { 625static const char *dma_names[] = {
750 "DMA_P", "DMA_S", "DMA_E", 626 "DMA_P", "DMA_S", "DMA_E",
751}; 627};
@@ -757,17 +633,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
757{ 633{
758 struct drm_crtc *crtc = NULL; 634 struct drm_crtc *crtc = NULL;
759 struct mdp4_crtc *mdp4_crtc; 635 struct mdp4_crtc *mdp4_crtc;
760 int ret;
761 636
762 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); 637 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
763 if (!mdp4_crtc) { 638 if (!mdp4_crtc)
764 ret = -ENOMEM; 639 return ERR_PTR(-ENOMEM);
765 goto fail;
766 }
767 640
768 crtc = &mdp4_crtc->base; 641 crtc = &mdp4_crtc->base;
769 642
770 mdp4_crtc->plane = plane;
771 mdp4_crtc->id = id; 643 mdp4_crtc->id = id;
772 644
773 mdp4_crtc->ovlp = ovlp_id; 645 mdp4_crtc->ovlp = ovlp_id;
@@ -784,26 +656,14 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
784 656
785 spin_lock_init(&mdp4_crtc->cursor.lock); 657 spin_lock_init(&mdp4_crtc->cursor.lock);
786 658
787 ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16, 659 drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
788 "unref fb", unref_fb_worker);
789 if (ret)
790 goto fail;
791
792 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
793 "unref cursor", unref_cursor_worker); 660 "unref cursor", unref_cursor_worker);
794 661
795 INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
796
797 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs); 662 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
798 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 663 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
664 plane->crtc = crtc;
799 665
800 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base); 666 mdp4_plane_install_properties(plane, &crtc->base);
801 667
802 return crtc; 668 return crtc;
803
804fail:
805 if (crtc)
806 mdp4_crtc_destroy(crtc);
807
808 return ERR_PTR(ret);
809} 669}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 79d804e61cc4..a62109e4ae0d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -228,7 +228,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
228 struct drm_encoder *encoder; 228 struct drm_encoder *encoder;
229 struct drm_connector *connector; 229 struct drm_connector *connector;
230 struct drm_panel *panel; 230 struct drm_panel *panel;
231 struct hdmi *hdmi;
232 int ret; 231 int ret;
233 232
234 /* construct non-private planes: */ 233 /* construct non-private planes: */
@@ -326,11 +325,13 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
326 priv->crtcs[priv->num_crtcs++] = crtc; 325 priv->crtcs[priv->num_crtcs++] = crtc;
327 priv->encoders[priv->num_encoders++] = encoder; 326 priv->encoders[priv->num_encoders++] = encoder;
328 327
329 hdmi = hdmi_init(dev, encoder); 328 if (priv->hdmi) {
330 if (IS_ERR(hdmi)) { 329 /* Construct bridge/connector for HDMI: */
331 ret = PTR_ERR(hdmi); 330 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
332 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 331 if (ret) {
333 goto fail; 332 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
333 goto fail;
334 }
334 } 335 }
335 336
336 return 0; 337 return 0;
@@ -381,6 +382,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
381 if (IS_ERR(mdp4_kms->dsi_pll_vddio)) 382 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
382 mdp4_kms->dsi_pll_vddio = NULL; 383 mdp4_kms->dsi_pll_vddio = NULL;
383 384
385 /* NOTE: driver for this regulator still missing upstream.. use
386 * _get_exclusive() and ignore the error if it does not exist
387 * (and hope that the bootloader left it on for us)
388 */
384 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); 389 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
385 if (IS_ERR(mdp4_kms->vdd)) 390 if (IS_ERR(mdp4_kms->vdd))
386 mdp4_kms->vdd = NULL; 391 mdp4_kms->vdd = NULL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 9ff6e7ccfe90..cbd77bc626d5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -32,13 +32,6 @@ struct mdp4_kms {
32 32
33 int rev; 33 int rev;
34 34
35 /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
36 * crtcs/encoders is in one shared register, we need to update it
37 * via read/modify/write. But to avoid getting confused by power-
38 * on-default values after resume, use this shadow value instead:
39 */
40 uint32_t mixer_cfg;
41
42 /* mapper-id used to request GEM buffer mapped for scanout: */ 35 /* mapper-id used to request GEM buffer mapped for scanout: */
43 int id; 36 int id;
44 37
@@ -194,14 +187,6 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
194 187
195void mdp4_plane_install_properties(struct drm_plane *plane, 188void mdp4_plane_install_properties(struct drm_plane *plane,
196 struct drm_mode_object *obj); 189 struct drm_mode_object *obj);
197void mdp4_plane_set_scanout(struct drm_plane *plane,
198 struct drm_framebuffer *fb);
199int mdp4_plane_mode_set(struct drm_plane *plane,
200 struct drm_crtc *crtc, struct drm_framebuffer *fb,
201 int crtc_x, int crtc_y,
202 unsigned int crtc_w, unsigned int crtc_h,
203 uint32_t src_x, uint32_t src_y,
204 uint32_t src_w, uint32_t src_h);
205enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); 190enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
206struct drm_plane *mdp4_plane_init(struct drm_device *dev, 191struct drm_plane *mdp4_plane_init(struct drm_device *dev,
207 enum mdp4_pipe pipe_id, bool private_plane); 192 enum mdp4_pipe pipe_id, bool private_plane);
@@ -210,8 +195,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
210void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 195void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
211void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); 196void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
212void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); 197void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
213void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
214void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
215struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 198struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
216 struct drm_plane *plane, int id, int ovlp_id, 199 struct drm_plane *plane, int id, int ovlp_id,
217 enum mdp4_dma dma_id); 200 enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 310034688c15..4ddc28e1275b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -98,6 +98,9 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
98 .detect = mdp4_lvds_connector_detect, 98 .detect = mdp4_lvds_connector_detect,
99 .fill_modes = drm_helper_probe_single_connector_modes, 99 .fill_modes = drm_helper_probe_single_connector_modes,
100 .destroy = mdp4_lvds_connector_destroy, 100 .destroy = mdp4_lvds_connector_destroy,
101 .reset = drm_atomic_helper_connector_reset,
102 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
103 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
101}; 104};
102 105
103static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { 106static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 66f33dba1ebb..1e5ebe83647d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -31,47 +31,26 @@ struct mdp4_plane {
31}; 31};
32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) 32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
33 33
34static struct mdp4_kms *get_kms(struct drm_plane *plane) 34static void mdp4_plane_set_scanout(struct drm_plane *plane,
35{ 35 struct drm_framebuffer *fb);
36 struct msm_drm_private *priv = plane->dev->dev_private; 36static int mdp4_plane_mode_set(struct drm_plane *plane,
37 return to_mdp4_kms(to_mdp_kms(priv->kms));
38}
39
40static int mdp4_plane_update(struct drm_plane *plane,
41 struct drm_crtc *crtc, struct drm_framebuffer *fb, 37 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y, 38 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h, 39 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y, 40 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h) 41 uint32_t src_w, uint32_t src_h);
46{
47 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
48
49 mdp4_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp4_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59}
60 42
61static int mdp4_plane_disable(struct drm_plane *plane) 43static struct mdp4_kms *get_kms(struct drm_plane *plane)
62{ 44{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 45 struct msm_drm_private *priv = plane->dev->dev_private;
64 DBG("%s: disable", mdp4_plane->name); 46 return to_mdp4_kms(to_mdp_kms(priv->kms));
65 if (plane->crtc)
66 mdp4_crtc_detach(plane->crtc, plane);
67 return 0;
68} 47}
69 48
70static void mdp4_plane_destroy(struct drm_plane *plane) 49static void mdp4_plane_destroy(struct drm_plane *plane)
71{ 50{
72 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 51 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
73 52
74 mdp4_plane_disable(plane); 53 drm_plane_helper_disable(plane);
75 drm_plane_cleanup(plane); 54 drm_plane_cleanup(plane);
76 55
77 kfree(mdp4_plane); 56 kfree(mdp4_plane);
@@ -92,19 +71,75 @@ int mdp4_plane_set_property(struct drm_plane *plane,
92} 71}
93 72
94static const struct drm_plane_funcs mdp4_plane_funcs = { 73static const struct drm_plane_funcs mdp4_plane_funcs = {
95 .update_plane = mdp4_plane_update, 74 .update_plane = drm_atomic_helper_update_plane,
96 .disable_plane = mdp4_plane_disable, 75 .disable_plane = drm_atomic_helper_disable_plane,
97 .destroy = mdp4_plane_destroy, 76 .destroy = mdp4_plane_destroy,
98 .set_property = mdp4_plane_set_property, 77 .set_property = mdp4_plane_set_property,
78 .reset = drm_atomic_helper_plane_reset,
79 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
80 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
99}; 81};
100 82
101void mdp4_plane_set_scanout(struct drm_plane *plane, 83static int mdp4_plane_prepare_fb(struct drm_plane *plane,
84 struct drm_framebuffer *fb)
85{
86 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
87 struct mdp4_kms *mdp4_kms = get_kms(plane);
88
89 DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
90 return msm_framebuffer_prepare(fb, mdp4_kms->id);
91}
92
93static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
94 struct drm_framebuffer *fb)
95{
96 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
97 struct mdp4_kms *mdp4_kms = get_kms(plane);
98
99 DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
100 msm_framebuffer_cleanup(fb, mdp4_kms->id);
101}
102
103
104static int mdp4_plane_atomic_check(struct drm_plane *plane,
105 struct drm_plane_state *state)
106{
107 return 0;
108}
109
110static void mdp4_plane_atomic_update(struct drm_plane *plane,
111 struct drm_plane_state *old_state)
112{
113 struct drm_plane_state *state = plane->state;
114 int ret;
115
116 ret = mdp4_plane_mode_set(plane,
117 state->crtc, state->fb,
118 state->crtc_x, state->crtc_y,
119 state->crtc_w, state->crtc_h,
120 state->src_x, state->src_y,
121 state->src_w, state->src_h);
122 /* atomic_check should have ensured that this doesn't fail */
123 WARN_ON(ret < 0);
124}
125
126static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
127 .prepare_fb = mdp4_plane_prepare_fb,
128 .cleanup_fb = mdp4_plane_cleanup_fb,
129 .atomic_check = mdp4_plane_atomic_check,
130 .atomic_update = mdp4_plane_atomic_update,
131};
132
133static void mdp4_plane_set_scanout(struct drm_plane *plane,
102 struct drm_framebuffer *fb) 134 struct drm_framebuffer *fb)
103{ 135{
104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 136 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
105 struct mdp4_kms *mdp4_kms = get_kms(plane); 137 struct mdp4_kms *mdp4_kms = get_kms(plane);
106 enum mdp4_pipe pipe = mdp4_plane->pipe; 138 enum mdp4_pipe pipe = mdp4_plane->pipe;
107 uint32_t iova; 139 uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0);
140
141 DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name,
142 iova, fb->pitches[0]);
108 143
109 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), 144 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
110 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 145 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -114,7 +149,6 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
114 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | 149 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
115 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); 150 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
116 151
117 msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
118 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova); 152 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
119 153
120 plane->fb = fb; 154 plane->fb = fb;
@@ -122,7 +156,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
122 156
123#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 157#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
124 158
125int mdp4_plane_mode_set(struct drm_plane *plane, 159static int mdp4_plane_mode_set(struct drm_plane *plane,
126 struct drm_crtc *crtc, struct drm_framebuffer *fb, 160 struct drm_crtc *crtc, struct drm_framebuffer *fb,
127 int crtc_x, int crtc_y, 161 int crtc_x, int crtc_y,
128 unsigned int crtc_w, unsigned int crtc_h, 162 unsigned int crtc_w, unsigned int crtc_h,
@@ -137,6 +171,11 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
137 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 171 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
138 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 172 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
139 173
174 if (!(crtc && fb)) {
175 DBG("%s: disabled!", mdp4_plane->name);
176 return 0;
177 }
178
140 /* src values are in Q16 fixed point, convert to integer: */ 179 /* src values are in Q16 fixed point, convert to integer: */
141 src_x = src_x >> 16; 180 src_x = src_x >> 16;
142 src_y = src_y >> 16; 181 src_y = src_y >> 16;
@@ -197,9 +236,6 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
197 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); 236 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
198 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); 237 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
199 238
200 /* TODO detach from old crtc (if we had more than one) */
201 mdp4_crtc_attach(crtc, plane);
202
203 return 0; 239 return 0;
204} 240}
205 241
@@ -239,9 +275,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
239 ARRAY_SIZE(mdp4_plane->formats)); 275 ARRAY_SIZE(mdp4_plane->formats));
240 276
241 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 277 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
242 drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, 278 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
243 mdp4_plane->formats, mdp4_plane->nformats, 279 mdp4_plane->formats, mdp4_plane->nformats, type);
244 type); 280 if (ret)
281 goto fail;
282
283 drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
245 284
246 mdp4_plane_install_properties(plane, &plane->base); 285 mdp4_plane_install_properties(plane, &plane->base);
247 286
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 67f4f896ba8c..e87ef5512cb0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013-2014 by the following authors: 22Copyright (C) 2013-2014 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
new file mode 100644
index 000000000000..b0a44310cf2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -0,0 +1,207 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_cfg.h"
16
17struct mdp5_cfg_handler {
18 int revision;
19 struct mdp5_cfg config;
20};
21
22/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
23const struct mdp5_cfg_hw *mdp5_cfg = NULL;
24
25const struct mdp5_cfg_hw msm8x74_config = {
26 .name = "msm8x74",
27 .smp = {
28 .mmb_count = 22,
29 .mmb_size = 4096,
30 },
31 .ctl = {
32 .count = 5,
33 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
34 },
35 .pipe_vig = {
36 .count = 3,
37 .base = { 0x01200, 0x01600, 0x01a00 },
38 },
39 .pipe_rgb = {
40 .count = 3,
41 .base = { 0x01e00, 0x02200, 0x02600 },
42 },
43 .pipe_dma = {
44 .count = 2,
45 .base = { 0x02a00, 0x02e00 },
46 },
47 .lm = {
48 .count = 5,
49 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
50 .nb_stages = 5,
51 },
52 .dspp = {
53 .count = 3,
54 .base = { 0x04600, 0x04a00, 0x04e00 },
55 },
56 .ad = {
57 .count = 2,
58 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
59 },
60 .intf = {
61 .count = 4,
62 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
63 },
64 .max_clk = 200000000,
65};
66
67const struct mdp5_cfg_hw apq8084_config = {
68 .name = "apq8084",
69 .smp = {
70 .mmb_count = 44,
71 .mmb_size = 8192,
72 .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
73 .reserved[CID_RGB0] = 2,
74 .reserved[CID_RGB1] = 2,
75 .reserved[CID_RGB2] = 2,
76 .reserved[CID_RGB3] = 2,
77 },
78 .ctl = {
79 .count = 5,
80 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
81 },
82 .pipe_vig = {
83 .count = 4,
84 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
85 },
86 .pipe_rgb = {
87 .count = 4,
88 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
89 },
90 .pipe_dma = {
91 .count = 2,
92 .base = { 0x03200, 0x03600 },
93 },
94 .lm = {
95 .count = 6,
96 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
97 .nb_stages = 5,
98 },
99 .dspp = {
100 .count = 4,
101 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
102
103 },
104 .ad = {
105 .count = 3,
106 .base = { 0x13500, 0x13700, 0x13900 },
107 },
108 .intf = {
109 .count = 5,
110 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
111 },
112 .max_clk = 320000000,
113};
114
115static const struct mdp5_cfg_handler cfg_handlers[] = {
116 { .revision = 0, .config = { .hw = &msm8x74_config } },
117 { .revision = 2, .config = { .hw = &msm8x74_config } },
118 { .revision = 3, .config = { .hw = &apq8084_config } },
119};
120
121
122static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
123
124const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
125{
126 return cfg_handler->config.hw;
127}
128
129struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
130{
131 return &cfg_handler->config;
132}
133
134int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
135{
136 return cfg_handler->revision;
137}
138
139void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
140{
141 kfree(cfg_handler);
142}
143
144struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
145 uint32_t major, uint32_t minor)
146{
147 struct drm_device *dev = mdp5_kms->dev;
148 struct platform_device *pdev = dev->platformdev;
149 struct mdp5_cfg_handler *cfg_handler;
150 struct mdp5_cfg_platform *pconfig;
151 int i, ret = 0;
152
153 cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
154 if (unlikely(!cfg_handler)) {
155 ret = -ENOMEM;
156 goto fail;
157 }
158
159 if (major != 1) {
160 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
161 major, minor);
162 ret = -ENXIO;
163 goto fail;
164 }
165
166 /* only after mdp5_cfg global pointer's init can we access the hw */
167 for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
168 if (cfg_handlers[i].revision != minor)
169 continue;
170 mdp5_cfg = cfg_handlers[i].config.hw;
171
172 break;
173 }
174 if (unlikely(!mdp5_cfg)) {
175 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
176 major, minor);
177 ret = -ENXIO;
178 goto fail;
179 }
180
181 cfg_handler->revision = minor;
182 cfg_handler->config.hw = mdp5_cfg;
183
184 pconfig = mdp5_get_config(pdev);
185 memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
186
187 DBG("MDP5: %s hw config selected", mdp5_cfg->name);
188
189 return cfg_handler;
190
191fail:
192 if (cfg_handler)
193 mdp5_cfg_destroy(cfg_handler);
194
195 return NULL;
196}
197
198static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
199{
200 static struct mdp5_cfg_platform config = {};
201#ifdef CONFIG_OF
202 /* TODO */
203#endif
204 config.iommu = iommu_domain_alloc(&platform_bus_type);
205
206 return &config;
207}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
new file mode 100644
index 000000000000..dba4d52cceeb
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CFG_H__
15#define __MDP5_CFG_H__
16
17#include "msm_drv.h"
18
19/*
20 * mdp5_cfg
21 *
22 * This module configures the dynamic offsets used by mdp5.xml.h
23 * (initialized in mdp5_cfg.c)
24 */
25extern const struct mdp5_cfg_hw *mdp5_cfg;
26
27#define MAX_CTL 8
28#define MAX_BASES 8
29#define MAX_SMP_BLOCKS 44
30#define MAX_CLIENTS 32
31
32typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
33
34#define MDP5_SUB_BLOCK_DEFINITION \
35 int count; \
36 uint32_t base[MAX_BASES]
37
38struct mdp5_sub_block {
39 MDP5_SUB_BLOCK_DEFINITION;
40};
41
42struct mdp5_lm_block {
43 MDP5_SUB_BLOCK_DEFINITION;
44 uint32_t nb_stages; /* number of stages per blender */
45};
46
47struct mdp5_smp_block {
48 int mmb_count; /* number of SMP MMBs */
49 int mmb_size; /* MMB: size in bytes */
50 mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
51 int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
52};
53
54struct mdp5_cfg_hw {
55 char *name;
56
57 struct mdp5_smp_block smp;
58 struct mdp5_sub_block ctl;
59 struct mdp5_sub_block pipe_vig;
60 struct mdp5_sub_block pipe_rgb;
61 struct mdp5_sub_block pipe_dma;
62 struct mdp5_lm_block lm;
63 struct mdp5_sub_block dspp;
64 struct mdp5_sub_block ad;
65 struct mdp5_sub_block intf;
66
67 uint32_t max_clk;
68};
69
70/* platform config data (ie. from DT, or pdata) */
71struct mdp5_cfg_platform {
72 struct iommu_domain *iommu;
73};
74
75struct mdp5_cfg {
76 const struct mdp5_cfg_hw *hw;
77 struct mdp5_cfg_platform platform;
78};
79
80struct mdp5_kms;
81struct mdp5_cfg_handler;
82
83const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
84struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
85int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
86
87struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
88 uint32_t major, uint32_t minor);
89void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
90
91#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index ebe2e60f3ab1..0e9a2e3a82d7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,43 +18,35 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#include <linux/sort.h>
20#include <drm/drm_mode.h> 22#include <drm/drm_mode.h>
21#include "drm_crtc.h" 23#include "drm_crtc.h"
22#include "drm_crtc_helper.h" 24#include "drm_crtc_helper.h"
23#include "drm_flip_work.h" 25#include "drm_flip_work.h"
24 26
27#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
28
25struct mdp5_crtc { 29struct mdp5_crtc {
26 struct drm_crtc base; 30 struct drm_crtc base;
27 char name[8]; 31 char name[8];
28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
30 int id; 32 int id;
31 bool enabled; 33 bool enabled;
32 34
33 /* which mixer/encoder we route output to: */ 35 /* layer mixer used for this CRTC (+ its lock): */
34 int mixer; 36#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
37 int lm;
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
39
40 /* CTL used for this CRTC: */
41 struct mdp5_ctl *ctl;
35 42
36 /* if there is a pending flip, these will be non-null: */ 43 /* if there is a pending flip, these will be non-null: */
37 struct drm_pending_vblank_event *event; 44 struct drm_pending_vblank_event *event;
38 struct msm_fence_cb pageflip_cb;
39 45
40#define PENDING_CURSOR 0x1 46#define PENDING_CURSOR 0x1
41#define PENDING_FLIP 0x2 47#define PENDING_FLIP 0x2
42 atomic_t pending; 48 atomic_t pending;
43 49
44 /* the fb that we logically (from PoV of KMS API) hold a ref
45 * to. Which we may not yet be scanning out (we may still
46 * be scanning out previous in case of page_flip while waiting
47 * for gpu rendering to complete:
48 */
49 struct drm_framebuffer *fb;
50
51 /* the fb that we currently hold a scanout ref to: */
52 struct drm_framebuffer *scanout_fb;
53
54 /* for unref'ing framebuffers after scanout completes: */
55 struct drm_flip_work unref_fb_work;
56
57 struct mdp_irq vblank; 50 struct mdp_irq vblank;
58 struct mdp_irq err; 51 struct mdp_irq err;
59}; 52};
@@ -73,67 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
73 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); 66 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
74} 67}
75 68
76static void crtc_flush(struct drm_crtc *crtc) 69#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
77{
78 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
79 struct mdp5_kms *mdp5_kms = get_kms(crtc);
80 int id = mdp5_crtc->id;
81 uint32_t i, flush = 0;
82
83 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
84 struct drm_plane *plane = mdp5_crtc->planes[i];
85 if (plane) {
86 enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
87 flush |= pipe2flush(pipe);
88 }
89 }
90 flush |= mixer2flush(mdp5_crtc->id);
91 flush |= MDP5_CTL_FLUSH_CTL;
92
93 DBG("%s: flush=%08x", mdp5_crtc->name, flush);
94
95 mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
96}
97 70
98static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) 71static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
99{ 72{
100 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 73 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
101 struct drm_framebuffer *old_fb = mdp5_crtc->fb;
102
103 /* grab reference to incoming scanout fb: */
104 drm_framebuffer_reference(new_fb);
105 mdp5_crtc->base.primary->fb = new_fb;
106 mdp5_crtc->fb = new_fb;
107 74
108 if (old_fb) 75 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
109 drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb); 76 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
110} 77}
111 78
112/* unlike update_fb(), take a ref to the new scanout fb *before* updating 79/*
113 * plane, then call this. Needed to ensure we don't unref the buffer that 80 * flush updates, to make sure hw is updated to new scanout fb,
114 * is actually still being scanned out. 81 * so that we can safely queue unref to current fb (ie. next
115 * 82 * vblank we know hw is done w/ previous scanout_fb).
116 * Note that this whole thing goes away with atomic.. since we can defer
117 * calling into driver until rendering is done.
118 */ 83 */
119static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) 84static void crtc_flush_all(struct drm_crtc *crtc)
120{ 85{
121 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 struct drm_plane *plane;
88 uint32_t flush_mask = 0;
122 89
123 /* flush updates, to make sure hw is updated to new scanout fb, 90 /* we could have already released CTL in the disable path: */
124 * so that we can safely queue unref to current fb (ie. next 91 if (!mdp5_crtc->ctl)
125 * vblank we know hw is done w/ previous scanout_fb). 92 return;
126 */
127 crtc_flush(crtc);
128
129 if (mdp5_crtc->scanout_fb)
130 drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
131 mdp5_crtc->scanout_fb);
132 93
133 mdp5_crtc->scanout_fb = fb; 94 drm_atomic_crtc_for_each_plane(plane, crtc) {
95 flush_mask |= mdp5_plane_get_flush(plane);
96 }
97 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
134 99
135 /* enable vblank to complete flip: */ 100 crtc_flush(crtc, flush_mask);
136 request_pending(crtc, PENDING_FLIP);
137} 101}
138 102
139/* if file!=NULL, this is preclose potential cancel-flip path */ 103/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -142,7 +106,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
142 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 106 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
143 struct drm_device *dev = crtc->dev; 107 struct drm_device *dev = crtc->dev;
144 struct drm_pending_vblank_event *event; 108 struct drm_pending_vblank_event *event;
145 unsigned long flags, i; 109 struct drm_plane *plane;
110 unsigned long flags;
146 111
147 spin_lock_irqsave(&dev->event_lock, flags); 112 spin_lock_irqsave(&dev->event_lock, flags);
148 event = mdp5_crtc->event; 113 event = mdp5_crtc->event;
@@ -153,50 +118,22 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
153 */ 118 */
154 if (!file || (event->base.file_priv == file)) { 119 if (!file || (event->base.file_priv == file)) {
155 mdp5_crtc->event = NULL; 120 mdp5_crtc->event = NULL;
121 DBG("%s: send event: %p", mdp5_crtc->name, event);
156 drm_send_vblank_event(dev, mdp5_crtc->id, event); 122 drm_send_vblank_event(dev, mdp5_crtc->id, event);
157 } 123 }
158 } 124 }
159 spin_unlock_irqrestore(&dev->event_lock, flags); 125 spin_unlock_irqrestore(&dev->event_lock, flags);
160 126
161 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) { 127 drm_atomic_crtc_for_each_plane(plane, crtc) {
162 struct drm_plane *plane = mdp5_crtc->planes[i]; 128 mdp5_plane_complete_flip(plane);
163 if (plane)
164 mdp5_plane_complete_flip(plane);
165 } 129 }
166} 130}
167 131
168static void pageflip_cb(struct msm_fence_cb *cb)
169{
170 struct mdp5_crtc *mdp5_crtc =
171 container_of(cb, struct mdp5_crtc, pageflip_cb);
172 struct drm_crtc *crtc = &mdp5_crtc->base;
173 struct drm_framebuffer *fb = mdp5_crtc->fb;
174
175 if (!fb)
176 return;
177
178 drm_framebuffer_reference(fb);
179 mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
180 update_scanout(crtc, fb);
181}
182
183static void unref_fb_worker(struct drm_flip_work *work, void *val)
184{
185 struct mdp5_crtc *mdp5_crtc =
186 container_of(work, struct mdp5_crtc, unref_fb_work);
187 struct drm_device *dev = mdp5_crtc->base.dev;
188
189 mutex_lock(&dev->mode_config.mutex);
190 drm_framebuffer_unreference(val);
191 mutex_unlock(&dev->mode_config.mutex);
192}
193
194static void mdp5_crtc_destroy(struct drm_crtc *crtc) 132static void mdp5_crtc_destroy(struct drm_crtc *crtc)
195{ 133{
196 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 134 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
197 135
198 drm_crtc_cleanup(crtc); 136 drm_crtc_cleanup(crtc);
199 drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
200 137
201 kfree(mdp5_crtc); 138 kfree(mdp5_crtc);
202} 139}
@@ -214,6 +151,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
214 mdp5_enable(mdp5_kms); 151 mdp5_enable(mdp5_kms);
215 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 152 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
216 } else { 153 } else {
154 /* set STAGE_UNUSED for all layers */
155 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
217 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 156 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
218 mdp5_disable(mdp5_kms); 157 mdp5_disable(mdp5_kms);
219 } 158 }
@@ -228,54 +167,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
228 return true; 167 return true;
229} 168}
230 169
170/*
171 * blend_setup() - blend all the planes of a CRTC
172 *
173 * When border is enabled, the border color will ALWAYS be the base layer.
174 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
175 * If disabled, the first plane starts at STAGE_BASE.
176 *
177 * Note:
178 * Border is not enabled here because the private plane is exactly
179 * the CRTC resolution.
180 */
231static void blend_setup(struct drm_crtc *crtc) 181static void blend_setup(struct drm_crtc *crtc)
232{ 182{
233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 183 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
234 struct mdp5_kms *mdp5_kms = get_kms(crtc); 184 struct mdp5_kms *mdp5_kms = get_kms(crtc);
235 int id = mdp5_crtc->id; 185 struct drm_plane *plane;
186 const struct mdp5_cfg_hw *hw_cfg;
187 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
188 unsigned long flags;
189#define blender(stage) ((stage) - STAGE_BASE)
236 190
237 /* 191 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
238 * Hard-coded setup for now until I figure out how the
239 * layer-mixer works
240 */
241 192
242 /* LM[id]: */ 193 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
243 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), 194
244 MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); 195 /* ctl could be released already when we are shutting down: */
245 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0), 196 if (!mdp5_crtc->ctl)
246 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | 197 goto out;
247 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
248 MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
249 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
250 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
251
252 /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
253 * we want to be setting CTL[m].LAYER[n]. Not sure what the
254 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
255 * used when chaining up mixers for high resolution displays?
256 */
257 198
258 /* CTL[id]: */ 199 drm_atomic_crtc_for_each_plane(plane, crtc) {
259 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), 200 enum mdp_mixer_stage_id stage =
260 MDP5_CTL_LAYER_REG_RGB0(STAGE0) | 201 to_mdp5_plane_state(plane->state)->stage;
261 MDP5_CTL_LAYER_REG_BORDER_COLOR); 202
262 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); 203 /*
263 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0); 204 * Note: This cannot happen with current implementation but
264 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0); 205 * we need to check this condition once z property is added
265 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0); 206 */
207 BUG_ON(stage > hw_cfg->lm.nb_stages);
208
209 /* LM */
210 mdp5_write(mdp5_kms,
211 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
212 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
213 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
214 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
215 blender(stage)), 0xff);
216 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
217 blender(stage)), 0x00);
218 /* CTL */
219 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
220 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
221 pipe2name(mdp5_plane_pipe(plane)), stage);
222 }
223
224 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
225 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
226
227out:
228 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
266} 229}
267 230
268static int mdp5_crtc_mode_set(struct drm_crtc *crtc, 231static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
269 struct drm_display_mode *mode,
270 struct drm_display_mode *adjusted_mode,
271 int x, int y,
272 struct drm_framebuffer *old_fb)
273{ 232{
274 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
275 struct mdp5_kms *mdp5_kms = get_kms(crtc); 234 struct mdp5_kms *mdp5_kms = get_kms(crtc);
276 int ret; 235 unsigned long flags;
236 struct drm_display_mode *mode;
277 237
278 mode = adjusted_mode; 238 if (WARN_ON(!crtc->state))
239 return;
240
241 mode = &crtc->state->adjusted_mode;
279 242
280 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 243 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
281 mdp5_crtc->name, mode->base.id, mode->name, 244 mdp5_crtc->name, mode->base.id, mode->name,
@@ -286,28 +249,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
286 mode->vsync_end, mode->vtotal, 249 mode->vsync_end, mode->vtotal,
287 mode->type, mode->flags); 250 mode->type, mode->flags);
288 251
289 /* grab extra ref for update_scanout() */ 252 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
290 drm_framebuffer_reference(crtc->primary->fb); 253 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
291
292 ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
293 0, 0, mode->hdisplay, mode->vdisplay,
294 x << 16, y << 16,
295 mode->hdisplay << 16, mode->vdisplay << 16);
296 if (ret) {
297 drm_framebuffer_unreference(crtc->primary->fb);
298 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
299 mdp5_crtc->name, ret);
300 return ret;
301 }
302
303 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
304 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | 254 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
305 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); 255 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
306 256 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
307 update_fb(crtc, crtc->primary->fb);
308 update_scanout(crtc, crtc->primary->fb);
309
310 return 0;
311} 257}
312 258
313static void mdp5_crtc_prepare(struct drm_crtc *crtc) 259static void mdp5_crtc_prepare(struct drm_crtc *crtc)
@@ -321,66 +267,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
321 267
322static void mdp5_crtc_commit(struct drm_crtc *crtc) 268static void mdp5_crtc_commit(struct drm_crtc *crtc)
323{ 269{
270 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
271 DBG("%s", mdp5_crtc->name);
324 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 272 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
325 crtc_flush(crtc); 273 crtc_flush_all(crtc);
326 /* drop the ref to mdp clk's that we got in prepare: */ 274 /* drop the ref to mdp clk's that we got in prepare: */
327 mdp5_disable(get_kms(crtc)); 275 mdp5_disable(get_kms(crtc));
328} 276}
329 277
330static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 278static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
331 struct drm_framebuffer *old_fb) 279{
280}
281
282struct plane_state {
283 struct drm_plane *plane;
284 struct mdp5_plane_state *state;
285};
286
287static int pstate_cmp(const void *a, const void *b)
288{
289 struct plane_state *pa = (struct plane_state *)a;
290 struct plane_state *pb = (struct plane_state *)b;
291 return pa->state->zpos - pb->state->zpos;
292}
293
294static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
295 struct drm_crtc_state *state)
332{ 296{
333 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 297 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
334 struct drm_plane *plane = mdp5_crtc->plane; 298 struct mdp5_kms *mdp5_kms = get_kms(crtc);
335 struct drm_display_mode *mode = &crtc->mode; 299 struct drm_plane *plane;
336 int ret; 300 struct drm_device *dev = crtc->dev;
337 301 struct plane_state pstates[STAGE3 + 1];
338 /* grab extra ref for update_scanout() */ 302 int cnt = 0, i;
339 drm_framebuffer_reference(crtc->primary->fb); 303
340 304 DBG("%s: check", mdp5_crtc->name);
341 ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb, 305
342 0, 0, mode->hdisplay, mode->vdisplay, 306 if (mdp5_crtc->event) {
343 x << 16, y << 16, 307 dev_err(dev->dev, "already pending flip!\n");
344 mode->hdisplay << 16, mode->vdisplay << 16); 308 return -EBUSY;
345 if (ret) {
346 drm_framebuffer_unreference(crtc->primary->fb);
347 return ret;
348 } 309 }
349 310
350 update_fb(crtc, crtc->primary->fb); 311 /* request a free CTL, if none is already allocated for this CRTC */
351 update_scanout(crtc, crtc->primary->fb); 312 if (state->enable && !mdp5_crtc->ctl) {
313 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
314 if (WARN_ON(!mdp5_crtc->ctl))
315 return -EINVAL;
316 }
317
318 /* verify that there are not too many planes attached to crtc
319 * and that we don't have conflicting mixer stages:
320 */
321 drm_atomic_crtc_state_for_each_plane(plane, state) {
322 struct drm_plane_state *pstate;
323
324 if (cnt >= ARRAY_SIZE(pstates)) {
325 dev_err(dev->dev, "too many planes!\n");
326 return -EINVAL;
327 }
328
329 pstate = state->state->plane_states[drm_plane_index(plane)];
330
331 /* plane might not have changed, in which case take
332 * current state:
333 */
334 if (!pstate)
335 pstate = plane->state;
336
337 pstates[cnt].plane = plane;
338 pstates[cnt].state = to_mdp5_plane_state(pstate);
339
340 cnt++;
341 }
342
343 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
344
345 for (i = 0; i < cnt; i++) {
346 pstates[i].state->stage = STAGE_BASE + i;
347 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
348 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
349 pstates[i].state->stage);
350 }
352 351
353 return 0; 352 return 0;
354} 353}
355 354
356static void mdp5_crtc_load_lut(struct drm_crtc *crtc) 355static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
357{ 356{
357 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
358 DBG("%s: begin", mdp5_crtc->name);
358} 359}
359 360
360static int mdp5_crtc_page_flip(struct drm_crtc *crtc, 361static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
361 struct drm_framebuffer *new_fb,
362 struct drm_pending_vblank_event *event,
363 uint32_t page_flip_flags)
364{ 362{
365 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 363 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
366 struct drm_device *dev = crtc->dev; 364 struct drm_device *dev = crtc->dev;
367 struct drm_gem_object *obj;
368 unsigned long flags; 365 unsigned long flags;
369 366
370 if (mdp5_crtc->event) { 367 DBG("%s: flush", mdp5_crtc->name);
371 dev_err(dev->dev, "already pending flip!\n");
372 return -EBUSY;
373 }
374 368
375 obj = msm_framebuffer_bo(new_fb, 0); 369 WARN_ON(mdp5_crtc->event);
376 370
377 spin_lock_irqsave(&dev->event_lock, flags); 371 spin_lock_irqsave(&dev->event_lock, flags);
378 mdp5_crtc->event = event; 372 mdp5_crtc->event = crtc->state->event;
379 spin_unlock_irqrestore(&dev->event_lock, flags); 373 spin_unlock_irqrestore(&dev->event_lock, flags);
380 374
381 update_fb(crtc, new_fb); 375 blend_setup(crtc);
376 crtc_flush_all(crtc);
377 request_pending(crtc, PENDING_FLIP);
382 378
383 return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb); 379 if (mdp5_crtc->ctl && !crtc->state->enable) {
380 mdp5_ctl_release(mdp5_crtc->ctl);
381 mdp5_crtc->ctl = NULL;
382 }
384} 383}
385 384
386static int mdp5_crtc_set_property(struct drm_crtc *crtc, 385static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -391,27 +390,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
391} 390}
392 391
393static const struct drm_crtc_funcs mdp5_crtc_funcs = { 392static const struct drm_crtc_funcs mdp5_crtc_funcs = {
394 .set_config = drm_crtc_helper_set_config, 393 .set_config = drm_atomic_helper_set_config,
395 .destroy = mdp5_crtc_destroy, 394 .destroy = mdp5_crtc_destroy,
396 .page_flip = mdp5_crtc_page_flip, 395 .page_flip = drm_atomic_helper_page_flip,
397 .set_property = mdp5_crtc_set_property, 396 .set_property = mdp5_crtc_set_property,
397 .reset = drm_atomic_helper_crtc_reset,
398 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
399 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
398}; 400};
399 401
400static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { 402static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
401 .dpms = mdp5_crtc_dpms, 403 .dpms = mdp5_crtc_dpms,
402 .mode_fixup = mdp5_crtc_mode_fixup, 404 .mode_fixup = mdp5_crtc_mode_fixup,
403 .mode_set = mdp5_crtc_mode_set, 405 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
406 .mode_set = drm_helper_crtc_mode_set,
407 .mode_set_base = drm_helper_crtc_mode_set_base,
404 .prepare = mdp5_crtc_prepare, 408 .prepare = mdp5_crtc_prepare,
405 .commit = mdp5_crtc_commit, 409 .commit = mdp5_crtc_commit,
406 .mode_set_base = mdp5_crtc_mode_set_base,
407 .load_lut = mdp5_crtc_load_lut, 410 .load_lut = mdp5_crtc_load_lut,
411 .atomic_check = mdp5_crtc_atomic_check,
412 .atomic_begin = mdp5_crtc_atomic_begin,
413 .atomic_flush = mdp5_crtc_atomic_flush,
408}; 414};
409 415
410static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 416static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
411{ 417{
412 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); 418 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
413 struct drm_crtc *crtc = &mdp5_crtc->base; 419 struct drm_crtc *crtc = &mdp5_crtc->base;
414 struct msm_drm_private *priv = crtc->dev->dev_private;
415 unsigned pending; 420 unsigned pending;
416 421
417 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); 422 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
@@ -420,16 +425,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
420 425
421 if (pending & PENDING_FLIP) { 426 if (pending & PENDING_FLIP) {
422 complete_flip(crtc, NULL); 427 complete_flip(crtc, NULL);
423 drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
424 } 428 }
425} 429}
426 430
427static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) 431static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
428{ 432{
429 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); 433 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
430 struct drm_crtc *crtc = &mdp5_crtc->base; 434
431 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); 435 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
432 crtc_flush(crtc);
433} 436}
434 437
435uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) 438uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -450,10 +453,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
450{ 453{
451 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 454 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
452 struct mdp5_kms *mdp5_kms = get_kms(crtc); 455 struct mdp5_kms *mdp5_kms = get_kms(crtc);
453 static const enum mdp5_intfnum intfnum[] = { 456 uint32_t flush_mask = 0;
454 INTF0, INTF1, INTF2, INTF3,
455 };
456 uint32_t intf_sel; 457 uint32_t intf_sel;
458 unsigned long flags;
457 459
458 /* now that we know what irq's we want: */ 460 /* now that we know what irq's we want: */
459 mdp5_crtc->err.irqmask = intf2err(intf); 461 mdp5_crtc->err.irqmask = intf2err(intf);
@@ -463,6 +465,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
463 if (!mdp5_kms) 465 if (!mdp5_kms)
464 return; 466 return;
465 467
468 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
466 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); 469 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
467 470
468 switch (intf) { 471 switch (intf) {
@@ -487,45 +490,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
487 break; 490 break;
488 } 491 }
489 492
490 blend_setup(crtc); 493 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
494 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
491 495
492 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); 496 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
497 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
498 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
499 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
493 500
494 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); 501 crtc_flush(crtc, flush_mask);
495 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
496 MDP5_CTL_OP_MODE(MODE_NONE) |
497 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
498
499 crtc_flush(crtc);
500} 502}
501 503
502static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, 504int mdp5_crtc_get_lm(struct drm_crtc *crtc)
503 struct drm_plane *plane)
504{ 505{
505 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 506 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
506 507
507 BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes)); 508 if (WARN_ON(!crtc))
509 return -EINVAL;
508 510
509 if (mdp5_crtc->planes[pipe_id] == plane) 511 return mdp5_crtc->lm;
510 return;
511
512 mdp5_crtc->planes[pipe_id] = plane;
513 blend_setup(crtc);
514 if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
515 crtc_flush(crtc);
516}
517
518void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
519{
520 set_attach(crtc, mdp5_plane_pipe(plane), plane);
521}
522
523void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
524{
525 /* don't actually detatch our primary plane: */
526 if (to_mdp5_crtc(crtc)->plane == plane)
527 return;
528 set_attach(crtc, mdp5_plane_pipe(plane), NULL);
529} 512}
530 513
531/* initialize crtc */ 514/* initialize crtc */
@@ -534,18 +517,17 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
534{ 517{
535 struct drm_crtc *crtc = NULL; 518 struct drm_crtc *crtc = NULL;
536 struct mdp5_crtc *mdp5_crtc; 519 struct mdp5_crtc *mdp5_crtc;
537 int ret;
538 520
539 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); 521 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
540 if (!mdp5_crtc) { 522 if (!mdp5_crtc)
541 ret = -ENOMEM; 523 return ERR_PTR(-ENOMEM);
542 goto fail;
543 }
544 524
545 crtc = &mdp5_crtc->base; 525 crtc = &mdp5_crtc->base;
546 526
547 mdp5_crtc->plane = plane;
548 mdp5_crtc->id = id; 527 mdp5_crtc->id = id;
528 mdp5_crtc->lm = GET_LM_ID(id);
529
530 spin_lock_init(&mdp5_crtc->lm_lock);
549 531
550 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; 532 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
551 mdp5_crtc->err.irq = mdp5_crtc_err_irq; 533 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -553,23 +535,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
553 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", 535 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
554 pipe2name(mdp5_plane_pipe(plane)), id); 536 pipe2name(mdp5_plane_pipe(plane)), id);
555 537
556 ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
557 "unref fb", unref_fb_worker);
558 if (ret)
559 goto fail;
560
561 INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
562
563 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); 538 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
564 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); 539 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
540 plane->crtc = crtc;
565 541
566 mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base); 542 mdp5_plane_install_properties(plane, &crtc->base);
567 543
568 return crtc; 544 return crtc;
569
570fail:
571 if (crtc)
572 mdp5_crtc_destroy(crtc);
573
574 return ERR_PTR(ret);
575} 545}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
new file mode 100644
index 000000000000..dea4505ac963
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_ctl.h"
16
17/*
18 * CTL - MDP Control Pool Manager
19 *
20 * Controls are shared between all CRTCs.
21 *
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
25 *
26 * Hardware capabilities determine the number of concurrent data paths
27 *
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
30 *
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
34 */
35
36struct mdp5_ctl {
37 struct mdp5_ctl_manager *ctlm;
38
39 u32 id;
40
41 /* whether this CTL has been allocated or not: */
42 bool busy;
43
44 /* memory output connection (@see mdp5_ctl_mode): */
45 u32 mode;
46
47 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
48 spinlock_t hw_lock;
49 u32 reg_offset;
50
51 /* flush mask used to commit CTL registers */
52 u32 flush_mask;
53
54 bool cursor_on;
55
56 struct drm_crtc *crtc;
57};
58
59struct mdp5_ctl_manager {
60 struct drm_device *dev;
61
62 /* number of CTL / Layer Mixers in this hw config: */
63 u32 nlm;
64 u32 nctl;
65
66 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
67 spinlock_t pool_lock;
68 struct mdp5_ctl ctls[MAX_CTL];
69};
70
71static inline
72struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
73{
74 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
75
76 return to_mdp5_kms(to_mdp_kms(priv->kms));
77}
78
79static inline
80void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
81{
82 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
83
84 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
85 mdp5_write(mdp5_kms, reg, data);
86}
87
88static inline
89u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
90{
91 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
92
93 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
94 return mdp5_read(mdp5_kms, reg);
95}
96
97
98int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
99{
100 unsigned long flags;
101 static const enum mdp5_intfnum intfnum[] = {
102 INTF0, INTF1, INTF2, INTF3,
103 };
104
105 spin_lock_irqsave(&ctl->hw_lock, flags);
106 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
107 MDP5_CTL_OP_MODE(ctl->mode) |
108 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
109 spin_unlock_irqrestore(&ctl->hw_lock, flags);
110
111 return 0;
112}
113
114int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
115{
116 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
117 unsigned long flags;
118 u32 blend_cfg;
119 int lm;
120
121 lm = mdp5_crtc_get_lm(ctl->crtc);
122 if (unlikely(WARN_ON(lm < 0))) {
123 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
124 ctl->id, lm);
125 return -EINVAL;
126 }
127
128 spin_lock_irqsave(&ctl->hw_lock, flags);
129
130 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
131
132 if (enable)
133 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
134 else
135 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
136
137 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
138
139 spin_unlock_irqrestore(&ctl->hw_lock, flags);
140
141 ctl->cursor_on = enable;
142
143 return 0;
144}
145
146
147int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
148{
149 unsigned long flags;
150
151 if (ctl->cursor_on)
152 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
153 else
154 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
155
156 spin_lock_irqsave(&ctl->hw_lock, flags);
157 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
158 spin_unlock_irqrestore(&ctl->hw_lock, flags);
159
160 return 0;
161}
162
163int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
164{
165 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
166 unsigned long flags;
167
168 if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
169 int lm = mdp5_crtc_get_lm(ctl->crtc);
170
171 if (unlikely(WARN_ON(lm < 0))) {
172 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
173 ctl->id, lm);
174 return -EINVAL;
175 }
176
177 /* for current targets, cursor bit is the same as LM bit */
178 flush_mask |= mdp_ctl_flush_mask_lm(lm);
179 }
180
181 spin_lock_irqsave(&ctl->hw_lock, flags);
182 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
183 spin_unlock_irqrestore(&ctl->hw_lock, flags);
184
185 return 0;
186}
187
188u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
189{
190 return ctl->flush_mask;
191}
192
193void mdp5_ctl_release(struct mdp5_ctl *ctl)
194{
195 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
196 unsigned long flags;
197
198 if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
199 dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
200 ctl->id, ctl->busy);
201 return;
202 }
203
204 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
205 ctl->busy = false;
206 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
207
208 DBG("CTL %d released", ctl->id);
209}
210
211/*
212 * mdp5_ctl_request() - CTL dynamic allocation
213 *
214 * Note: Current implementation considers that we can only have one CRTC per CTL
215 *
216 * @return first free CTL
217 */
218struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
219 struct drm_crtc *crtc)
220{
221 struct mdp5_ctl *ctl = NULL;
222 unsigned long flags;
223 int c;
224
225 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
226
227 for (c = 0; c < ctl_mgr->nctl; c++)
228 if (!ctl_mgr->ctls[c].busy)
229 break;
230
231 if (unlikely(c >= ctl_mgr->nctl)) {
232 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
233 goto unlock;
234 }
235
236 ctl = &ctl_mgr->ctls[c];
237
238 ctl->crtc = crtc;
239 ctl->busy = true;
240 DBG("CTL %d allocated", ctl->id);
241
242unlock:
243 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
244 return ctl;
245}
246
247void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
248{
249 unsigned long flags;
250 int c;
251
252 for (c = 0; c < ctl_mgr->nctl; c++) {
253 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
254
255 spin_lock_irqsave(&ctl->hw_lock, flags);
256 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
257 spin_unlock_irqrestore(&ctl->hw_lock, flags);
258 }
259}
260
261void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
262{
263 kfree(ctl_mgr);
264}
265
266struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
267 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
268{
269 struct mdp5_ctl_manager *ctl_mgr;
270 const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
271 unsigned long flags;
272 int c, ret;
273
274 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
275 if (!ctl_mgr) {
276 dev_err(dev->dev, "failed to allocate CTL manager\n");
277 ret = -ENOMEM;
278 goto fail;
279 }
280
281 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
282 dev_err(dev->dev, "Increase static pool size to at least %d\n",
283 ctl_cfg->count);
284 ret = -ENOSPC;
285 goto fail;
286 }
287
288 /* initialize the CTL manager: */
289 ctl_mgr->dev = dev;
290 ctl_mgr->nlm = hw_cfg->lm.count;
291 ctl_mgr->nctl = ctl_cfg->count;
292 spin_lock_init(&ctl_mgr->pool_lock);
293
294 /* initialize each CTL of the pool: */
295 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
296 for (c = 0; c < ctl_mgr->nctl; c++) {
297 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
298
299 if (WARN_ON(!ctl_cfg->base[c])) {
300 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
301 ret = -EINVAL;
302 goto fail;
303 }
304 ctl->ctlm = ctl_mgr;
305 ctl->id = c;
306 ctl->mode = MODE_NONE;
307 ctl->reg_offset = ctl_cfg->base[c];
308 ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
309 ctl->busy = false;
310 spin_lock_init(&ctl->hw_lock);
311 }
312 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
313 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
314
315 return ctl_mgr;
316
317fail:
318 if (ctl_mgr)
319 mdp5_ctlm_destroy(ctl_mgr);
320
321 return ERR_PTR(ret);
322}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
new file mode 100644
index 000000000000..1018519b6af2
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -0,0 +1,122 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CTL_H__
15#define __MDP5_CTL_H__
16
17#include "msm_drv.h"
18
19/*
20 * CTL Manager prototypes:
21 * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
22 * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
23 */
24struct mdp5_ctl_manager;
25struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
26 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
27void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
28void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
29
30/*
31 * CTL prototypes:
32 * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
34 */
35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
36
37int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
38
39int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
40
41/* @blend_cfg: see LM blender config definition below */
42int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
43
44/* @flush_mask: see CTL flush masks definitions below */
45int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
46u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
47
48void mdp5_ctl_release(struct mdp5_ctl *ctl);
49
50/*
51 * blend_cfg (LM blender config):
52 *
53 * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
54 * are being blended according to their stage (z-order), through @blend_cfg arg.
55 */
56static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
57 enum mdp_mixer_stage_id stage)
58{
59 switch (pipe) {
60 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
61 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
62 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
63 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
64 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
65 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
66 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
67 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
68 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
69 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
70 default: return 0;
71 }
72}
73
74/*
75 * flush_mask (CTL flush masks):
76 *
77 * The following functions allow each DRM entity to get and store
78 * their own flush mask.
79 * Once stored, these masks will then be accessed through each DRM's
80 * interface and used by the caller of mdp5_ctl_commit() to specify
81 * which block(s) need to be flushed through @flush_mask parameter.
82 */
83
84#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
85
86static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
87{
88 /* TODO: use id once multiple cursor support is present */
89 (void)cursor_id;
90
91 return MDP5_CTL_FLUSH_CURSOR_DUMMY;
92}
93
94static inline u32 mdp_ctl_flush_mask_lm(int lm)
95{
96 switch (lm) {
97 case 0: return MDP5_CTL_FLUSH_LM0;
98 case 1: return MDP5_CTL_FLUSH_LM1;
99 case 2: return MDP5_CTL_FLUSH_LM2;
100 case 5: return MDP5_CTL_FLUSH_LM5;
101 default: return 0;
102 }
103}
104
105static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
106{
107 switch (pipe) {
108 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
109 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
110 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
111 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
112 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
113 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
114 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
115 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
116 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
117 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
118 default: return 0;
119 }
120}
121
122#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index edec7bfaa952..0254bfdeb92f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -24,6 +24,7 @@ struct mdp5_encoder {
24 struct drm_encoder base; 24 struct drm_encoder base;
25 int intf; 25 int intf;
26 enum mdp5_intf intf_id; 26 enum mdp5_intf intf_id;
27 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
27 bool enabled; 28 bool enabled;
28 uint32_t bsc; 29 uint32_t bsc;
29}; 30};
@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
115 struct mdp5_kms *mdp5_kms = get_kms(encoder); 116 struct mdp5_kms *mdp5_kms = get_kms(encoder);
116 int intf = mdp5_encoder->intf; 117 int intf = mdp5_encoder->intf;
117 bool enabled = (mode == DRM_MODE_DPMS_ON); 118 bool enabled = (mode == DRM_MODE_DPMS_ON);
119 unsigned long flags;
118 120
119 DBG("mode=%d", mode); 121 DBG("mode=%d", mode);
120 122
@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
123 125
124 if (enabled) { 126 if (enabled) {
125 bs_set(mdp5_encoder, 1); 127 bs_set(mdp5_encoder, 1);
128 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
126 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); 129 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
130 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
127 } else { 131 } else {
132 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
128 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); 133 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
134 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
135
136 /*
137 * Wait for a vsync so we know the ENABLE=0 latched before
138 * the (connector) source of the vsync's gets disabled,
139 * otherwise we end up in a funny state if we re-enable
140 * before the disable latches, which results that some of
141 * the settings changes for the new modeset (like new
142 * scanout buffer) don't latch properly..
143 */
144 mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
145
129 bs_set(mdp5_encoder, 0); 146 bs_set(mdp5_encoder, 0);
130 } 147 }
131 148
@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
150 uint32_t display_v_start, display_v_end; 167 uint32_t display_v_start, display_v_end;
151 uint32_t hsync_start_x, hsync_end_x; 168 uint32_t hsync_start_x, hsync_end_x;
152 uint32_t format; 169 uint32_t format;
170 unsigned long flags;
153 171
154 mode = adjusted_mode; 172 mode = adjusted_mode;
155 173
@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
180 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; 198 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
181 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; 199 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
182 200
201 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
202
183 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), 203 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
184 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | 204 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
185 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); 205 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
201 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); 221 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
202 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); 222 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
203 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ 223 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
224
225 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
204} 226}
205 227
206static void mdp5_encoder_prepare(struct drm_encoder *encoder) 228static void mdp5_encoder_prepare(struct drm_encoder *encoder)
@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
242 mdp5_encoder->intf_id = intf_id; 264 mdp5_encoder->intf_id = intf_id;
243 encoder = &mdp5_encoder->base; 265 encoder = &mdp5_encoder->base;
244 266
267 spin_lock_init(&mdp5_encoder->intf_lock);
268
245 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, 269 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS); 270 DRM_MODE_ENCODER_TMDS);
247 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 271 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index f2b985bc2adf..70ac81edd40f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,6 +15,8 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/irqdomain.h>
19#include <linux/irq.h>
18 20
19#include "msm_drv.h" 21#include "msm_drv.h"
20#include "mdp5_kms.h" 22#include "mdp5_kms.h"
@@ -88,11 +90,17 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
88 90
89 VERB("intr=%08x", intr); 91 VERB("intr=%08x", intr);
90 92
91 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) 93 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
92 mdp5_irq_mdp(mdp_kms); 94 mdp5_irq_mdp(mdp_kms);
95 intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
96 }
93 97
94 if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI) 98 while (intr) {
95 hdmi_irq(0, mdp5_kms->hdmi); 99 irq_hw_number_t hwirq = fls(intr) - 1;
100 generic_handle_irq(irq_find_mapping(
101 mdp5_kms->irqcontroller.domain, hwirq));
102 intr &= ~(1 << hwirq);
103 }
96 104
97 return IRQ_HANDLED; 105 return IRQ_HANDLED;
98} 106}
@@ -109,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
109 mdp_update_vblank_mask(to_mdp_kms(kms), 117 mdp_update_vblank_mask(to_mdp_kms(kms),
110 mdp5_crtc_vblank(crtc), false); 118 mdp5_crtc_vblank(crtc), false);
111} 119}
120
121/*
122 * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
123 * can register to get their irq's delivered
124 */
125
126#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
127 MDP5_HW_INTR_STATUS_INTR_DSI1 | \
128 MDP5_HW_INTR_STATUS_INTR_HDMI | \
129 MDP5_HW_INTR_STATUS_INTR_EDP)
130
131static void mdp5_hw_mask_irq(struct irq_data *irqd)
132{
133 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
134 smp_mb__before_atomic();
135 clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
136 smp_mb__after_atomic();
137}
138
139static void mdp5_hw_unmask_irq(struct irq_data *irqd)
140{
141 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
142 smp_mb__before_atomic();
143 set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
144 smp_mb__after_atomic();
145}
146
147static struct irq_chip mdp5_hw_irq_chip = {
148 .name = "mdp5",
149 .irq_mask = mdp5_hw_mask_irq,
150 .irq_unmask = mdp5_hw_unmask_irq,
151};
152
153static int mdp5_hw_irqdomain_map(struct irq_domain *d,
154 unsigned int irq, irq_hw_number_t hwirq)
155{
156 struct mdp5_kms *mdp5_kms = d->host_data;
157
158 if (!(VALID_IRQS & (1 << hwirq)))
159 return -EPERM;
160
161 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
162 irq_set_chip_data(irq, mdp5_kms);
163 set_irq_flags(irq, IRQF_VALID);
164
165 return 0;
166}
167
168static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
169 .map = mdp5_hw_irqdomain_map,
170 .xlate = irq_domain_xlate_onecell,
171};
172
173
174int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
175{
176 struct device *dev = mdp5_kms->dev->dev;
177 struct irq_domain *d;
178
179 d = irq_domain_add_linear(dev->of_node, 32,
180 &mdp5_hw_irqdomain_ops, mdp5_kms);
181 if (!d) {
182 dev_err(dev, "mdp5 irq domain add failed\n");
183 return -ENXIO;
184 }
185
186 mdp5_kms->irqcontroller.enabled_mask = 0;
187 mdp5_kms->irqcontroller.domain = d;
188
189 return 0;
190}
191
192void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
193{
194 if (mdp5_kms->irqcontroller.domain) {
195 irq_domain_remove(mdp5_kms->irqcontroller.domain);
196 mdp5_kms->irqcontroller.domain = NULL;
197 }
198}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 31a2c6331a1d..a11f1b80c488 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
24 "mdp_0", 25 "mdp_0",
25}; 26};
26 27
27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
28
29const struct mdp5_config *mdp5_cfg;
30
31static const struct mdp5_config msm8x74_config = {
32 .name = "msm8x74",
33 .ctl = {
34 .count = 5,
35 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
36 },
37 .pipe_vig = {
38 .count = 3,
39 .base = { 0x01200, 0x01600, 0x01a00 },
40 },
41 .pipe_rgb = {
42 .count = 3,
43 .base = { 0x01e00, 0x02200, 0x02600 },
44 },
45 .pipe_dma = {
46 .count = 2,
47 .base = { 0x02a00, 0x02e00 },
48 },
49 .lm = {
50 .count = 5,
51 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
52 },
53 .dspp = {
54 .count = 3,
55 .base = { 0x04600, 0x04a00, 0x04e00 },
56 },
57 .ad = {
58 .count = 2,
59 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
60 },
61 .intf = {
62 .count = 4,
63 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
64 },
65};
66
67static const struct mdp5_config apq8084_config = {
68 .name = "apq8084",
69 .ctl = {
70 .count = 5,
71 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
72 },
73 .pipe_vig = {
74 .count = 4,
75 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
76 },
77 .pipe_rgb = {
78 .count = 4,
79 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
80 },
81 .pipe_dma = {
82 .count = 2,
83 .base = { 0x03200, 0x03600 },
84 },
85 .lm = {
86 .count = 6,
87 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
88 },
89 .dspp = {
90 .count = 4,
91 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
92
93 },
94 .ad = {
95 .count = 3,
96 .base = { 0x13500, 0x13700, 0x13900 },
97 },
98 .intf = {
99 .count = 5,
100 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
101 },
102};
103
104struct mdp5_config_entry {
105 int revision;
106 const struct mdp5_config *config;
107};
108
109static const struct mdp5_config_entry mdp5_configs[] = {
110 { .revision = 0, .config = &msm8x74_config },
111 { .revision = 2, .config = &msm8x74_config },
112 { .revision = 3, .config = &apq8084_config },
113};
114
115static int mdp5_select_hw_cfg(struct msm_kms *kms)
116{
117 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
118 struct drm_device *dev = mdp5_kms->dev;
119 uint32_t version, major, minor;
120 int i, ret = 0;
121
122 mdp5_enable(mdp5_kms);
123 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
124 mdp5_disable(mdp5_kms);
125
126 major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
127 minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
128
129 DBG("found MDP5 version v%d.%d", major, minor);
130
131 if (major != 1) {
132 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
133 major, minor);
134 ret = -ENXIO;
135 goto out;
136 }
137
138 mdp5_kms->rev = minor;
139
140 /* only after mdp5_cfg global pointer's init can we access the hw */
141 for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
142 if (mdp5_configs[i].revision != minor)
143 continue;
144 mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
145 break;
146 }
147 if (unlikely(!mdp5_kms->hw_cfg)) {
148 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
149 major, minor);
150 ret = -ENXIO;
151 goto out;
152 }
153
154 DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
155
156 return 0;
157out:
158 return ret;
159}
160
161static int mdp5_hw_init(struct msm_kms *kms) 28static int mdp5_hw_init(struct msm_kms *kms)
162{ 29{
163 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
164 struct drm_device *dev = mdp5_kms->dev; 31 struct drm_device *dev = mdp5_kms->dev;
165 int i; 32 unsigned long flags;
166 33
167 pm_runtime_get_sync(dev->dev); 34 pm_runtime_get_sync(dev->dev);
168 35
@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
190 * care. 57 * care.
191 */ 58 */
192 59
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
193 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 61 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
194 63
195 for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++) 64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
196 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
197 65
198 pm_runtime_put_sync(dev->dev); 66 pm_runtime_put_sync(dev->dev);
199 67
@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
221 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 89 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
222 struct msm_mmu *mmu = mdp5_kms->mmu; 90 struct msm_mmu *mmu = mdp5_kms->mmu;
223 91
92 mdp5_irq_domain_fini(mdp5_kms);
93
224 if (mmu) { 94 if (mmu) {
225 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); 95 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
226 mmu->funcs->destroy(mmu); 96 mmu->funcs->destroy(mmu);
227 } 97 }
98
99 if (mdp5_kms->ctlm)
100 mdp5_ctlm_destroy(mdp5_kms->ctlm);
101 if (mdp5_kms->smp)
102 mdp5_smp_destroy(mdp5_kms->smp);
103 if (mdp5_kms->cfg)
104 mdp5_cfg_destroy(mdp5_kms->cfg);
105
228 kfree(mdp5_kms); 106 kfree(mdp5_kms);
229} 107}
230 108
@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
274 static const enum mdp5_pipe crtcs[] = { 152 static const enum mdp5_pipe crtcs[] = {
275 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 153 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
276 }; 154 };
155 static const enum mdp5_pipe pub_planes[] = {
156 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
157 };
277 struct drm_device *dev = mdp5_kms->dev; 158 struct drm_device *dev = mdp5_kms->dev;
278 struct msm_drm_private *priv = dev->dev_private; 159 struct msm_drm_private *priv = dev->dev_private;
279 struct drm_encoder *encoder; 160 struct drm_encoder *encoder;
161 const struct mdp5_cfg_hw *hw_cfg;
280 int i, ret; 162 int i, ret;
281 163
282 /* construct CRTCs: */ 164 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
283 for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) { 165
166 /* register our interrupt-controller for hdmi/eDP/dsi/etc
167 * to use for irqs routed through mdp:
168 */
169 ret = mdp5_irq_domain_init(mdp5_kms);
170 if (ret)
171 goto fail;
172
173 /* construct CRTCs and their private planes: */
174 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
284 struct drm_plane *plane; 175 struct drm_plane *plane;
285 struct drm_crtc *crtc; 176 struct drm_crtc *crtc;
286 177
287 plane = mdp5_plane_init(dev, crtcs[i], true); 178 plane = mdp5_plane_init(dev, crtcs[i], true,
179 hw_cfg->pipe_rgb.base[i]);
288 if (IS_ERR(plane)) { 180 if (IS_ERR(plane)) {
289 ret = PTR_ERR(plane); 181 ret = PTR_ERR(plane);
290 dev_err(dev->dev, "failed to construct plane for %s (%d)\n", 182 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
302 priv->crtcs[priv->num_crtcs++] = crtc; 194 priv->crtcs[priv->num_crtcs++] = crtc;
303 } 195 }
304 196
197 /* Construct public planes: */
198 for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
199 struct drm_plane *plane;
200
201 plane = mdp5_plane_init(dev, pub_planes[i], false,
202 hw_cfg->pipe_vig.base[i]);
203 if (IS_ERR(plane)) {
204 ret = PTR_ERR(plane);
205 dev_err(dev->dev, "failed to construct %s plane: %d\n",
206 pipe2name(pub_planes[i]), ret);
207 goto fail;
208 }
209 }
210
305 /* Construct encoder for HDMI: */ 211 /* Construct encoder for HDMI: */
306 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); 212 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
307 if (IS_ERR(encoder)) { 213 if (IS_ERR(encoder)) {
@@ -324,11 +230,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
324 priv->encoders[priv->num_encoders++] = encoder; 230 priv->encoders[priv->num_encoders++] = encoder;
325 231
326 /* Construct bridge/connector for HDMI: */ 232 /* Construct bridge/connector for HDMI: */
327 mdp5_kms->hdmi = hdmi_init(dev, encoder); 233 if (priv->hdmi) {
328 if (IS_ERR(mdp5_kms->hdmi)) { 234 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
329 ret = PTR_ERR(mdp5_kms->hdmi); 235 if (ret) {
330 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 236 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
331 goto fail; 237 goto fail;
238 }
332 } 239 }
333 240
334 return 0; 241 return 0;
@@ -337,6 +244,21 @@ fail:
337 return ret; 244 return ret;
338} 245}
339 246
247static void read_hw_revision(struct mdp5_kms *mdp5_kms,
248 uint32_t *major, uint32_t *minor)
249{
250 uint32_t version;
251
252 mdp5_enable(mdp5_kms);
253 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
254 mdp5_disable(mdp5_kms);
255
256 *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
257 *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
258
259 DBG("MDP5 version v%d.%d", *major, *minor);
260}
261
340static int get_clk(struct platform_device *pdev, struct clk **clkp, 262static int get_clk(struct platform_device *pdev, struct clk **clkp,
341 const char *name) 263 const char *name)
342{ 264{
@@ -353,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
353struct msm_kms *mdp5_kms_init(struct drm_device *dev) 275struct msm_kms *mdp5_kms_init(struct drm_device *dev)
354{ 276{
355 struct platform_device *pdev = dev->platformdev; 277 struct platform_device *pdev = dev->platformdev;
356 struct mdp5_platform_config *config = mdp5_get_config(pdev); 278 struct mdp5_cfg *config;
357 struct mdp5_kms *mdp5_kms; 279 struct mdp5_kms *mdp5_kms;
358 struct msm_kms *kms = NULL; 280 struct msm_kms *kms = NULL;
359 struct msm_mmu *mmu; 281 struct msm_mmu *mmu;
282 uint32_t major, minor;
360 int i, ret; 283 int i, ret;
361 284
362 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); 285 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
@@ -366,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
366 goto fail; 289 goto fail;
367 } 290 }
368 291
292 spin_lock_init(&mdp5_kms->resource_lock);
293
369 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 294 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
370 295
371 kms = &mdp5_kms->base.base; 296 kms = &mdp5_kms->base.base;
372 297
373 mdp5_kms->dev = dev; 298 mdp5_kms->dev = dev;
374 mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
375 299
376 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 300 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
377 if (IS_ERR(mdp5_kms->mmio)) { 301 if (IS_ERR(mdp5_kms->mmio)) {
@@ -416,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
416 if (ret) 340 if (ret)
417 goto fail; 341 goto fail;
418 342
419 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); 343 /* we need to set a default rate before enabling. Set a safe
344 * rate first, then figure out hw revision, and then set a
345 * more optimal rate:
346 */
347 clk_set_rate(mdp5_kms->src_clk, 200000000);
348
349 read_hw_revision(mdp5_kms, &major, &minor);
420 350
421 ret = mdp5_select_hw_cfg(kms); 351 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
422 if (ret) 352 if (IS_ERR(mdp5_kms->cfg)) {
353 ret = PTR_ERR(mdp5_kms->cfg);
354 mdp5_kms->cfg = NULL;
423 goto fail; 355 goto fail;
356 }
357
358 config = mdp5_cfg_get_config(mdp5_kms->cfg);
359
360 /* TODO: compute core clock rate at runtime */
361 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
362
363 mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
364 if (IS_ERR(mdp5_kms->smp)) {
365 ret = PTR_ERR(mdp5_kms->smp);
366 mdp5_kms->smp = NULL;
367 goto fail;
368 }
369
370 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
371 if (IS_ERR(mdp5_kms->ctlm)) {
372 ret = PTR_ERR(mdp5_kms->ctlm);
373 mdp5_kms->ctlm = NULL;
374 goto fail;
375 }
424 376
425 /* make sure things are off before attaching iommu (bootloader could 377 /* make sure things are off before attaching iommu (bootloader could
426 * have left things on, in which case we'll start getting faults if 378 * have left things on, in which case we'll start getting faults if
427 * we don't disable): 379 * we don't disable):
428 */ 380 */
429 mdp5_enable(mdp5_kms); 381 mdp5_enable(mdp5_kms);
430 for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++) 382 for (i = 0; i < config->hw->intf.count; i++)
431 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 383 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
432 mdp5_disable(mdp5_kms); 384 mdp5_disable(mdp5_kms);
433 mdelay(16); 385 mdelay(16);
434 386
435 if (config->iommu) { 387 if (config->platform.iommu) {
436 mmu = msm_iommu_new(&pdev->dev, config->iommu); 388 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
437 if (IS_ERR(mmu)) { 389 if (IS_ERR(mmu)) {
438 ret = PTR_ERR(mmu); 390 ret = PTR_ERR(mmu);
439 dev_err(dev->dev, "failed to init iommu: %d\n", ret); 391 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -474,18 +426,3 @@ fail:
474 mdp5_destroy(kms); 426 mdp5_destroy(kms);
475 return ERR_PTR(ret); 427 return ERR_PTR(ret);
476} 428}
477
478static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
479{
480 static struct mdp5_platform_config config = {};
481#ifdef CONFIG_OF
482 /* TODO */
483#endif
484 config.iommu = iommu_domain_alloc(&platform_bus_type);
485 /* TODO hard-coded in downstream mdss, but should it be? */
486 config.max_clk = 200000000;
487 /* TODO get from DT: */
488 config.smp_blk_cnt = 22;
489
490 return &config;
491}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 5bf340dd0f00..dd69c77c0d64 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,25 +21,9 @@
21#include "msm_drv.h" 21#include "msm_drv.h"
22#include "msm_kms.h" 22#include "msm_kms.h"
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */ 24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
25#define MDP5_MAX_BASES 8
26struct mdp5_sub_block {
27 int count;
28 uint32_t base[MDP5_MAX_BASES];
29};
30struct mdp5_config {
31 char *name;
32 struct mdp5_sub_block ctl;
33 struct mdp5_sub_block pipe_vig;
34 struct mdp5_sub_block pipe_rgb;
35 struct mdp5_sub_block pipe_dma;
36 struct mdp5_sub_block lm;
37 struct mdp5_sub_block dspp;
38 struct mdp5_sub_block ad;
39 struct mdp5_sub_block intf;
40};
41extern const struct mdp5_config *mdp5_cfg;
42#include "mdp5.xml.h" 25#include "mdp5.xml.h"
26#include "mdp5_ctl.h"
43#include "mdp5_smp.h" 27#include "mdp5_smp.h"
44 28
45struct mdp5_kms { 29struct mdp5_kms {
@@ -47,17 +31,14 @@ struct mdp5_kms {
47 31
48 struct drm_device *dev; 32 struct drm_device *dev;
49 33
50 int rev; 34 struct mdp5_cfg_handler *cfg;
51 const struct mdp5_config *hw_cfg;
52 35
53 /* mapper-id used to request GEM buffer mapped for scanout: */ 36 /* mapper-id used to request GEM buffer mapped for scanout: */
54 int id; 37 int id;
55 struct msm_mmu *mmu; 38 struct msm_mmu *mmu;
56 39
57 /* for tracking smp allocation amongst pipes: */ 40 struct mdp5_smp *smp;
58 mdp5_smp_state_t smp_state; 41 struct mdp5_ctl_manager *ctlm;
59 struct mdp5_client_smp_state smp_client_state[CID_MAX];
60 int smp_blk_cnt;
61 42
62 /* io/register spaces: */ 43 /* io/register spaces: */
63 void __iomem *mmio, *vbif; 44 void __iomem *mmio, *vbif;
@@ -71,18 +52,47 @@ struct mdp5_kms {
71 struct clk *lut_clk; 52 struct clk *lut_clk;
72 struct clk *vsync_clk; 53 struct clk *vsync_clk;
73 54
74 struct hdmi *hdmi; 55 /*
56 * lock to protect access to global resources: ie., following register:
57 * - REG_MDP5_DISP_INTF_SEL
58 */
59 spinlock_t resource_lock;
75 60
76 struct mdp_irq error_handler; 61 struct mdp_irq error_handler;
62
63 struct {
64 volatile unsigned long enabled_mask;
65 struct irq_domain *domain;
66 } irqcontroller;
77}; 67};
78#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 68#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
79 69
80/* platform config data (ie. from DT, or pdata) */ 70struct mdp5_plane_state {
81struct mdp5_platform_config { 71 struct drm_plane_state base;
82 struct iommu_domain *iommu; 72
83 uint32_t max_clk; 73 /* "virtual" zpos.. we calculate actual mixer-stage at runtime
84 int smp_blk_cnt; 74 * by sorting the attached planes by zpos and then assigning
75 * mixer stage lowest to highest. Private planes get default
76 * zpos of zero, and public planes a unique value that is
77 * greater than zero. This way, things work out if a naive
78 * userspace assigns planes to a crtc without setting zpos.
79 */
80 int zpos;
81
82 /* the actual mixer stage, calculated in crtc->atomic_check()
83 * NOTE: this should move to mdp5_crtc_state, when that exists
84 */
85 enum mdp_mixer_stage_id stage;
86
87 /* some additional transactional status to help us know in the
88 * apply path whether we need to update SMP allocation, and
89 * whether current update is still pending:
90 */
91 bool mode_changed : 1;
92 bool pending : 1;
85}; 93};
94#define to_mdp5_plane_state(x) \
95 container_of(x, struct mdp5_plane_state, base)
86 96
87static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) 97static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
88{ 98{
@@ -107,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
107 return names[pipe]; 117 return names[pipe];
108} 118}
109 119
110static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
111{
112 switch (pipe) {
113 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
114 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
115 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
116 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
117 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
118 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
119 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
120 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
121 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
122 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
123 default: return 0;
124 }
125}
126
127static inline int pipe2nclients(enum mdp5_pipe pipe) 120static inline int pipe2nclients(enum mdp5_pipe pipe)
128{ 121{
129 switch (pipe) { 122 switch (pipe) {
@@ -137,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
137 } 130 }
138} 131}
139 132
140static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
141{
142 WARN_ON(plane >= pipe2nclients(pipe));
143 switch (pipe) {
144 case SSPP_VIG0: return CID_VIG0_Y + plane;
145 case SSPP_VIG1: return CID_VIG1_Y + plane;
146 case SSPP_VIG2: return CID_VIG2_Y + plane;
147 case SSPP_RGB0: return CID_RGB0;
148 case SSPP_RGB1: return CID_RGB1;
149 case SSPP_RGB2: return CID_RGB2;
150 case SSPP_DMA0: return CID_DMA0_Y + plane;
151 case SSPP_DMA1: return CID_DMA1_Y + plane;
152 case SSPP_VIG3: return CID_VIG3_Y + plane;
153 case SSPP_RGB3: return CID_RGB3;
154 default: return CID_UNUSED;
155 }
156}
157
158static inline uint32_t mixer2flush(int lm)
159{
160 switch (lm) {
161 case 0: return MDP5_CTL_FLUSH_LM0;
162 case 1: return MDP5_CTL_FLUSH_LM1;
163 case 2: return MDP5_CTL_FLUSH_LM2;
164 default: return 0;
165 }
166}
167
168static inline uint32_t intf2err(int intf) 133static inline uint32_t intf2err(int intf)
169{ 134{
170 switch (intf) { 135 switch (intf) {
@@ -197,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
197irqreturn_t mdp5_irq(struct msm_kms *kms); 162irqreturn_t mdp5_irq(struct msm_kms *kms);
198int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 163int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
199void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 164void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
165int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
166void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
200 167
201static inline 168static inline
202uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, 169uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
@@ -210,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
210 177
211void mdp5_plane_install_properties(struct drm_plane *plane, 178void mdp5_plane_install_properties(struct drm_plane *plane,
212 struct drm_mode_object *obj); 179 struct drm_mode_object *obj);
213void mdp5_plane_set_scanout(struct drm_plane *plane, 180uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
214 struct drm_framebuffer *fb);
215int mdp5_plane_mode_set(struct drm_plane *plane,
216 struct drm_crtc *crtc, struct drm_framebuffer *fb,
217 int crtc_x, int crtc_y,
218 unsigned int crtc_w, unsigned int crtc_h,
219 uint32_t src_x, uint32_t src_y,
220 uint32_t src_w, uint32_t src_h);
221void mdp5_plane_complete_flip(struct drm_plane *plane); 181void mdp5_plane_complete_flip(struct drm_plane *plane);
222enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 182enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
223struct drm_plane *mdp5_plane_init(struct drm_device *dev, 183struct drm_plane *mdp5_plane_init(struct drm_device *dev,
224 enum mdp5_pipe pipe, bool private_plane); 184 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
225 185
226uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 186uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
227 187
188int mdp5_crtc_get_lm(struct drm_crtc *crtc);
228void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 189void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
229void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, 190void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
230 enum mdp5_intf intf_id); 191 enum mdp5_intf intf_id);
231void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
232void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
233struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 192struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
234 struct drm_plane *plane, int id); 193 struct drm_plane *plane, int id);
235 194
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index f3daec4412ad..26e5fdea6594 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,6 +18,7 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#define MAX_PLANE 4
20 22
21struct mdp5_plane { 23struct mdp5_plane {
22 struct drm_plane base; 24 struct drm_plane base;
@@ -24,6 +26,11 @@ struct mdp5_plane {
24 26
25 enum mdp5_pipe pipe; 27 enum mdp5_pipe pipe;
26 28
29 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
30 uint32_t reg_offset;
31
32 uint32_t flush_mask; /* used to commit pipe registers */
33
27 uint32_t nformats; 34 uint32_t nformats;
28 uint32_t formats[32]; 35 uint32_t formats[32];
29 36
@@ -31,31 +38,24 @@ struct mdp5_plane {
31}; 38};
32#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 39#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
33 40
41static int mdp5_plane_mode_set(struct drm_plane *plane,
42 struct drm_crtc *crtc, struct drm_framebuffer *fb,
43 int crtc_x, int crtc_y,
44 unsigned int crtc_w, unsigned int crtc_h,
45 uint32_t src_x, uint32_t src_y,
46 uint32_t src_w, uint32_t src_h);
47static void set_scanout_locked(struct drm_plane *plane,
48 struct drm_framebuffer *fb);
49
34static struct mdp5_kms *get_kms(struct drm_plane *plane) 50static struct mdp5_kms *get_kms(struct drm_plane *plane)
35{ 51{
36 struct msm_drm_private *priv = plane->dev->dev_private; 52 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp5_kms(to_mdp_kms(priv->kms)); 53 return to_mdp5_kms(to_mdp_kms(priv->kms));
38} 54}
39 55
40static int mdp5_plane_update(struct drm_plane *plane, 56static bool plane_enabled(struct drm_plane_state *state)
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{ 57{
47 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 58 return state->fb && state->crtc;
48
49 mdp5_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp5_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59} 59}
60 60
61static int mdp5_plane_disable(struct drm_plane *plane) 61static int mdp5_plane_disable(struct drm_plane *plane)
@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane)
63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
64 struct mdp5_kms *mdp5_kms = get_kms(plane); 64 struct mdp5_kms *mdp5_kms = get_kms(plane);
65 enum mdp5_pipe pipe = mdp5_plane->pipe; 65 enum mdp5_pipe pipe = mdp5_plane->pipe;
66 int i;
67 66
68 DBG("%s: disable", mdp5_plane->name); 67 DBG("%s: disable", mdp5_plane->name);
69 68
70 /* update our SMP request to zero (release all our blks): */ 69 if (mdp5_kms) {
71 for (i = 0; i < pipe2nclients(pipe); i++) 70 /* Release the memory we requested earlier from the SMP: */
72 mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0); 71 mdp5_smp_release(mdp5_kms->smp, pipe);
73 72 }
74 /* TODO detaching now will cause us not to get the last
75 * vblank and mdp5_smp_commit().. so other planes will
76 * still see smp blocks previously allocated to us as
77 * in-use..
78 */
79 if (plane->crtc)
80 mdp5_crtc_detach(plane->crtc, plane);
81 73
82 return 0; 74 return 0;
83} 75}
@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane)
85static void mdp5_plane_destroy(struct drm_plane *plane) 77static void mdp5_plane_destroy(struct drm_plane *plane)
86{ 78{
87 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 79 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
88 struct msm_drm_private *priv = plane->dev->dev_private;
89
90 if (priv->kms)
91 mdp5_plane_disable(plane);
92 80
81 drm_plane_helper_disable(plane);
93 drm_plane_cleanup(plane); 82 drm_plane_cleanup(plane);
94 83
95 kfree(mdp5_plane); 84 kfree(mdp5_plane);
@@ -109,109 +98,186 @@ int mdp5_plane_set_property(struct drm_plane *plane,
109 return -EINVAL; 98 return -EINVAL;
110} 99}
111 100
101static void mdp5_plane_reset(struct drm_plane *plane)
102{
103 struct mdp5_plane_state *mdp5_state;
104
105 if (plane->state && plane->state->fb)
106 drm_framebuffer_unreference(plane->state->fb);
107
108 kfree(to_mdp5_plane_state(plane->state));
109 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
110
111 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
112 mdp5_state->zpos = 0;
113 } else {
114 mdp5_state->zpos = 1 + drm_plane_index(plane);
115 }
116
117 plane->state = &mdp5_state->base;
118}
119
120static struct drm_plane_state *
121mdp5_plane_duplicate_state(struct drm_plane *plane)
122{
123 struct mdp5_plane_state *mdp5_state;
124
125 if (WARN_ON(!plane->state))
126 return NULL;
127
128 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
129 sizeof(*mdp5_state), GFP_KERNEL);
130
131 if (mdp5_state && mdp5_state->base.fb)
132 drm_framebuffer_reference(mdp5_state->base.fb);
133
134 mdp5_state->mode_changed = false;
135 mdp5_state->pending = false;
136
137 return &mdp5_state->base;
138}
139
140static void mdp5_plane_destroy_state(struct drm_plane *plane,
141 struct drm_plane_state *state)
142{
143 if (state->fb)
144 drm_framebuffer_unreference(state->fb);
145
146 kfree(to_mdp5_plane_state(state));
147}
148
112static const struct drm_plane_funcs mdp5_plane_funcs = { 149static const struct drm_plane_funcs mdp5_plane_funcs = {
113 .update_plane = mdp5_plane_update, 150 .update_plane = drm_atomic_helper_update_plane,
114 .disable_plane = mdp5_plane_disable, 151 .disable_plane = drm_atomic_helper_disable_plane,
115 .destroy = mdp5_plane_destroy, 152 .destroy = mdp5_plane_destroy,
116 .set_property = mdp5_plane_set_property, 153 .set_property = mdp5_plane_set_property,
154 .reset = mdp5_plane_reset,
155 .atomic_duplicate_state = mdp5_plane_duplicate_state,
156 .atomic_destroy_state = mdp5_plane_destroy_state,
117}; 157};
118 158
119void mdp5_plane_set_scanout(struct drm_plane *plane, 159static int mdp5_plane_prepare_fb(struct drm_plane *plane,
120 struct drm_framebuffer *fb) 160 struct drm_framebuffer *fb)
121{ 161{
122 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 162 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
123 struct mdp5_kms *mdp5_kms = get_kms(plane); 163 struct mdp5_kms *mdp5_kms = get_kms(plane);
124 enum mdp5_pipe pipe = mdp5_plane->pipe;
125 uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
126 uint32_t iova[4];
127 int i;
128
129 for (i = 0; i < nplanes; i++) {
130 struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
131 msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
132 }
133 for (; i < 4; i++)
134 iova[i] = 0;
135 164
136 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 165 DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
137 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 166 return msm_framebuffer_prepare(fb, mdp5_kms->id);
138 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
139
140 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
141 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
142 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
143
144 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
145 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
146 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
147 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
148
149 plane->fb = fb;
150} 167}
151 168
152/* NOTE: looks like if horizontal decimation is used (if we supported that) 169static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
153 * then the width used to calculate SMP block requirements is the post- 170 struct drm_framebuffer *fb)
154 * decimated width. Ie. SMP buffering sits downstream of decimation (which
155 * presumably happens during the dma from scanout buffer).
156 */
157static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
158 uint32_t nplanes, uint32_t width)
159{ 171{
160 struct drm_device *dev = plane->dev;
161 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 172 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
162 struct mdp5_kms *mdp5_kms = get_kms(plane); 173 struct mdp5_kms *mdp5_kms = get_kms(plane);
163 enum mdp5_pipe pipe = mdp5_plane->pipe;
164 int i, hsub, nlines, nblks, ret;
165 174
166 hsub = drm_format_horz_chroma_subsampling(format); 175 DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
176 msm_framebuffer_cleanup(fb, mdp5_kms->id);
177}
167 178
168 /* different if BWC (compressed framebuffer?) enabled: */ 179static int mdp5_plane_atomic_check(struct drm_plane *plane,
169 nlines = 2; 180 struct drm_plane_state *state)
181{
182 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
183 struct drm_plane_state *old_state = plane->state;
170 184
171 for (i = 0, nblks = 0; i < nplanes; i++) { 185 DBG("%s: check (%d -> %d)", mdp5_plane->name,
172 int n, fetch_stride, cpp; 186 plane_enabled(old_state), plane_enabled(state));
173 187
174 cpp = drm_format_plane_cpp(format, i); 188 if (plane_enabled(state) && plane_enabled(old_state)) {
175 fetch_stride = width * cpp / (i ? hsub : 1); 189 /* we cannot change SMP block configuration during scanout: */
190 bool full_modeset = false;
191 if (state->fb->pixel_format != old_state->fb->pixel_format) {
192 DBG("%s: pixel_format change!", mdp5_plane->name);
193 full_modeset = true;
194 }
195 if (state->src_w != old_state->src_w) {
196 DBG("%s: src_w change!", mdp5_plane->name);
197 full_modeset = true;
198 }
199 if (to_mdp5_plane_state(old_state)->pending) {
200 DBG("%s: still pending!", mdp5_plane->name);
201 full_modeset = true;
202 }
203 if (full_modeset) {
204 struct drm_crtc_state *crtc_state =
205 drm_atomic_get_crtc_state(state->state, state->crtc);
206 crtc_state->mode_changed = true;
207 to_mdp5_plane_state(state)->mode_changed = true;
208 }
209 } else {
210 to_mdp5_plane_state(state)->mode_changed = true;
211 }
176 212
177 n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); 213 return 0;
214}
178 215
179 /* for hw rev v1.00 */ 216static void mdp5_plane_atomic_update(struct drm_plane *plane,
180 if (mdp5_kms->rev == 0) 217 struct drm_plane_state *old_state)
181 n = roundup_pow_of_two(n); 218{
219 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
220 struct drm_plane_state *state = plane->state;
182 221
183 DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); 222 DBG("%s: update", mdp5_plane->name);
184 ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
185 if (ret) {
186 dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
187 n, ret);
188 return ret;
189 }
190 223
191 nblks += n; 224 if (!plane_enabled(state)) {
225 to_mdp5_plane_state(state)->pending = true;
226 mdp5_plane_disable(plane);
227 } else if (to_mdp5_plane_state(state)->mode_changed) {
228 int ret;
229 to_mdp5_plane_state(state)->pending = true;
230 ret = mdp5_plane_mode_set(plane,
231 state->crtc, state->fb,
232 state->crtc_x, state->crtc_y,
233 state->crtc_w, state->crtc_h,
234 state->src_x, state->src_y,
235 state->src_w, state->src_h);
236 /* atomic_check should have ensured that this doesn't fail */
237 WARN_ON(ret < 0);
238 } else {
239 unsigned long flags;
240 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
241 set_scanout_locked(plane, state->fb);
242 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
192 } 243 }
193
194 /* in success case, return total # of blocks allocated: */
195 return nblks;
196} 244}
197 245
198static void set_fifo_thresholds(struct drm_plane *plane, int nblks) 246static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
247 .prepare_fb = mdp5_plane_prepare_fb,
248 .cleanup_fb = mdp5_plane_cleanup_fb,
249 .atomic_check = mdp5_plane_atomic_check,
250 .atomic_update = mdp5_plane_atomic_update,
251};
252
253static void set_scanout_locked(struct drm_plane *plane,
254 struct drm_framebuffer *fb)
199{ 255{
200 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 256 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
201 struct mdp5_kms *mdp5_kms = get_kms(plane); 257 struct mdp5_kms *mdp5_kms = get_kms(plane);
202 enum mdp5_pipe pipe = mdp5_plane->pipe; 258 enum mdp5_pipe pipe = mdp5_plane->pipe;
203 uint32_t val;
204 259
205 /* 1/4 of SMP pool that is being fetched */ 260 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
206 val = (nblks * SMP_ENTRIES_PER_BLK) / 4; 261 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
262 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
207 263
208 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); 264 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
209 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); 265 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
210 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); 266 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
267
268 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
269 msm_framebuffer_iova(fb, mdp5_kms->id, 0));
270 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
271 msm_framebuffer_iova(fb, mdp5_kms->id, 1));
272 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
273 msm_framebuffer_iova(fb, mdp5_kms->id, 2));
274 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
275 msm_framebuffer_iova(fb, mdp5_kms->id, 4));
211 276
277 plane->fb = fb;
212} 278}
213 279
214int mdp5_plane_mode_set(struct drm_plane *plane, 280static int mdp5_plane_mode_set(struct drm_plane *plane,
215 struct drm_crtc *crtc, struct drm_framebuffer *fb, 281 struct drm_crtc *crtc, struct drm_framebuffer *fb,
216 int crtc_x, int crtc_y, 282 int crtc_x, int crtc_y,
217 unsigned int crtc_w, unsigned int crtc_h, 283 unsigned int crtc_w, unsigned int crtc_h,
@@ -225,7 +291,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
225 uint32_t nplanes, config = 0; 291 uint32_t nplanes, config = 0;
226 uint32_t phasex_step = 0, phasey_step = 0; 292 uint32_t phasex_step = 0, phasey_step = 0;
227 uint32_t hdecm = 0, vdecm = 0; 293 uint32_t hdecm = 0, vdecm = 0;
228 int i, nblks; 294 unsigned long flags;
295 int ret;
229 296
230 nplanes = drm_format_num_planes(fb->pixel_format); 297 nplanes = drm_format_num_planes(fb->pixel_format);
231 298
@@ -243,12 +310,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
243 fb->base.id, src_x, src_y, src_w, src_h, 310 fb->base.id, src_x, src_y, src_w, src_h,
244 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); 311 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
245 312
246 /* 313 /* Request some memory from the SMP: */
247 * Calculate and request required # of smp blocks: 314 ret = mdp5_smp_request(mdp5_kms->smp,
248 */ 315 mdp5_plane->pipe, fb->pixel_format, src_w);
249 nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w); 316 if (ret)
250 if (nblks < 0) 317 return ret;
251 return nblks;
252 318
253 /* 319 /*
254 * Currently we update the hw for allocations/requests immediately, 320 * Currently we update the hw for allocations/requests immediately,
@@ -256,8 +322,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
256 * would move into atomic->check_plane_state(), while updating the 322 * would move into atomic->check_plane_state(), while updating the
257 * hw would remain here: 323 * hw would remain here:
258 */ 324 */
259 for (i = 0; i < pipe2nclients(pipe); i++) 325 mdp5_smp_configure(mdp5_kms->smp, pipe);
260 mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
261 326
262 if (src_w != crtc_w) { 327 if (src_w != crtc_w) {
263 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; 328 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
@@ -269,6 +334,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
269 /* TODO calc phasey_step, vdecm */ 334 /* TODO calc phasey_step, vdecm */
270 } 335 }
271 336
337 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
338
272 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), 339 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
273 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | 340 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
274 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); 341 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
@@ -289,8 +356,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
289 MDP5_PIPE_OUT_XY_X(crtc_x) | 356 MDP5_PIPE_OUT_XY_X(crtc_x) |
290 MDP5_PIPE_OUT_XY_Y(crtc_y)); 357 MDP5_PIPE_OUT_XY_Y(crtc_y));
291 358
292 mdp5_plane_set_scanout(plane, fb);
293
294 format = to_mdp_format(msm_framebuffer_format(fb)); 359 format = to_mdp_format(msm_framebuffer_format(fb));
295 360
296 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), 361 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
@@ -330,22 +395,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
330 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | 395 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
331 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); 396 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
332 397
333 set_fifo_thresholds(plane, nblks); 398 set_scanout_locked(plane, fb);
334 399
335 /* TODO detach from old crtc (if we had more than one) */ 400 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
336 mdp5_crtc_attach(crtc, plane);
337 401
338 return 0; 402 return ret;
339} 403}
340 404
341void mdp5_plane_complete_flip(struct drm_plane *plane) 405void mdp5_plane_complete_flip(struct drm_plane *plane)
342{ 406{
343 struct mdp5_kms *mdp5_kms = get_kms(plane); 407 struct mdp5_kms *mdp5_kms = get_kms(plane);
344 enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe; 408 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
345 int i; 409 enum mdp5_pipe pipe = mdp5_plane->pipe;
410
411 DBG("%s: complete flip", mdp5_plane->name);
346 412
347 for (i = 0; i < pipe2nclients(pipe); i++) 413 mdp5_smp_commit(mdp5_kms->smp, pipe);
348 mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i)); 414
415 to_mdp5_plane_state(plane->state)->pending = false;
349} 416}
350 417
351enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) 418enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
@@ -354,9 +421,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
354 return mdp5_plane->pipe; 421 return mdp5_plane->pipe;
355} 422}
356 423
424uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
425{
426 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
427
428 return mdp5_plane->flush_mask;
429}
430
357/* initialize plane */ 431/* initialize plane */
358struct drm_plane *mdp5_plane_init(struct drm_device *dev, 432struct drm_plane *mdp5_plane_init(struct drm_device *dev,
359 enum mdp5_pipe pipe, bool private_plane) 433 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
360{ 434{
361 struct drm_plane *plane = NULL; 435 struct drm_plane *plane = NULL;
362 struct mdp5_plane *mdp5_plane; 436 struct mdp5_plane *mdp5_plane;
@@ -377,10 +451,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
377 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, 451 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
378 ARRAY_SIZE(mdp5_plane->formats)); 452 ARRAY_SIZE(mdp5_plane->formats));
379 453
454 mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
455 mdp5_plane->reg_offset = reg_offset;
456 spin_lock_init(&mdp5_plane->pipe_lock);
457
380 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 458 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
381 drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 459 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
382 mdp5_plane->formats, mdp5_plane->nformats, 460 mdp5_plane->formats, mdp5_plane->nformats,
383 type); 461 type);
462 if (ret)
463 goto fail;
464
465 drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
384 466
385 mdp5_plane_install_properties(plane, &plane->base); 467 mdp5_plane_install_properties(plane, &plane->base);
386 468
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 2d0236b963a6..bf551885e019 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -29,8 +30,11 @@
29 * Based on the size of the attached scanout buffer, a certain # of 30 * Based on the size of the attached scanout buffer, a certain # of
30 * blocks must be allocated to that client out of the shared pool. 31 * blocks must be allocated to that client out of the shared pool.
31 * 32 *
32 * For each block, it can be either free, or pending/in-use by a 33 * In some hw, some blocks are statically allocated for certain pipes
33 * client. The updates happen in three steps: 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 *
36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps:
34 * 38 *
35 * 1) mdp5_smp_request(): 39 * 1) mdp5_smp_request():
36 * When plane scanout is setup, calculate required number of 40 * When plane scanout is setup, calculate required number of
@@ -61,21 +65,68 @@
61 * inuse and pending state of all clients.. 65 * inuse and pending state of all clients..
62 */ 66 */
63 67
64static DEFINE_SPINLOCK(smp_lock); 68struct mdp5_smp {
69 struct drm_device *dev;
70
71 int blk_cnt;
72 int blk_size;
73
74 spinlock_t state_lock;
75 mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
76
77 struct mdp5_client_smp_state client_state[CID_MAX];
78};
65 79
80static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{
83 struct msm_drm_private *priv = smp->dev->dev_private;
84
85 return to_mdp5_kms(to_mdp_kms(priv->kms));
86}
87
88static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
89{
90 WARN_ON(plane >= pipe2nclients(pipe));
91 switch (pipe) {
92 case SSPP_VIG0: return CID_VIG0_Y + plane;
93 case SSPP_VIG1: return CID_VIG1_Y + plane;
94 case SSPP_VIG2: return CID_VIG2_Y + plane;
95 case SSPP_RGB0: return CID_RGB0;
96 case SSPP_RGB1: return CID_RGB1;
97 case SSPP_RGB2: return CID_RGB2;
98 case SSPP_DMA0: return CID_DMA0_Y + plane;
99 case SSPP_DMA1: return CID_DMA1_Y + plane;
100 case SSPP_VIG3: return CID_VIG3_Y + plane;
101 case SSPP_RGB3: return CID_RGB3;
102 default: return CID_UNUSED;
103 }
104}
66 105
67/* step #1: update # of blocks pending for the client: */ 106/* step #1: update # of blocks pending for the client: */
68int mdp5_smp_request(struct mdp5_kms *mdp5_kms, 107static int smp_request_block(struct mdp5_smp *smp,
69 enum mdp5_client_id cid, int nblks) 108 enum mdp5_client_id cid, int nblks)
70{ 109{
71 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 110 struct mdp5_kms *mdp5_kms = get_kms(smp);
72 int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt; 111 const struct mdp5_cfg_hw *hw_cfg;
112 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
113 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
114 int reserved;
73 unsigned long flags; 115 unsigned long flags;
74 116
75 spin_lock_irqsave(&smp_lock, flags); 117 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
118 reserved = hw_cfg->smp.reserved[cid];
119
120 spin_lock_irqsave(&smp->state_lock, flags);
76 121
77 avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt); 122 nblks -= reserved;
123 if (reserved)
124 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
125
126 avail = cnt - bitmap_weight(smp->state, cnt);
78 if (nblks > avail) { 127 if (nblks > avail) {
128 dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
129 nblks, avail);
79 ret = -ENOSPC; 130 ret = -ENOSPC;
80 goto fail; 131 goto fail;
81 } 132 }
@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
84 if (nblks > cur_nblks) { 135 if (nblks > cur_nblks) {
85 /* grow the existing pending reservation: */ 136 /* grow the existing pending reservation: */
86 for (i = cur_nblks; i < nblks; i++) { 137 for (i = cur_nblks; i < nblks; i++) {
87 int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt); 138 int blk = find_first_zero_bit(smp->state, cnt);
88 set_bit(blk, ps->pending); 139 set_bit(blk, ps->pending);
89 set_bit(blk, mdp5_kms->smp_state); 140 set_bit(blk, smp->state);
90 } 141 }
91 } else { 142 } else {
92 /* shrink the existing pending reservation: */ 143 /* shrink the existing pending reservation: */
@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
98 } 149 }
99 150
100fail: 151fail:
101 spin_unlock_irqrestore(&smp_lock, flags); 152 spin_unlock_irqrestore(&smp->state_lock, flags);
153 return 0;
154}
155
156static void set_fifo_thresholds(struct mdp5_smp *smp,
157 enum mdp5_pipe pipe, int nblks)
158{
159 struct mdp5_kms *mdp5_kms = get_kms(smp);
160 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
161 u32 val;
162
163 /* 1/4 of SMP pool that is being fetched */
164 val = (nblks * smp_entries_per_blk) / 4;
165
166 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
167 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
168 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
169}
170
171/*
172 * NOTE: looks like if horizontal decimation is used (if we supported that)
173 * then the width used to calculate SMP block requirements is the post-
174 * decimated width. Ie. SMP buffering sits downstream of decimation (which
175 * presumably happens during the dma from scanout buffer).
176 */
177int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
178{
179 struct mdp5_kms *mdp5_kms = get_kms(smp);
180 struct drm_device *dev = mdp5_kms->dev;
181 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
182 int i, hsub, nplanes, nlines, nblks, ret;
183
184 nplanes = drm_format_num_planes(fmt);
185 hsub = drm_format_horz_chroma_subsampling(fmt);
186
187 /* different if BWC (compressed framebuffer?) enabled: */
188 nlines = 2;
189
190 for (i = 0, nblks = 0; i < nplanes; i++) {
191 int n, fetch_stride, cpp;
192
193 cpp = drm_format_plane_cpp(fmt, i);
194 fetch_stride = width * cpp / (i ? hsub : 1);
195
196 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
197
198 /* for hw rev v1.00 */
199 if (rev == 0)
200 n = roundup_pow_of_two(n);
201
202 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
203 ret = smp_request_block(smp, pipe2client(pipe, i), n);
204 if (ret) {
205 dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
206 n, ret);
207 return ret;
208 }
209
210 nblks += n;
211 }
212
213 set_fifo_thresholds(smp, pipe, nblks);
214
102 return 0; 215 return 0;
103} 216}
104 217
105static void update_smp_state(struct mdp5_kms *mdp5_kms, 218/* Release SMP blocks for all clients of the pipe */
219void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
220{
221 int i, nblks;
222
223 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
224 smp_request_block(smp, pipe2client(pipe, i), 0);
225 set_fifo_thresholds(smp, pipe, 0);
226}
227
228static void update_smp_state(struct mdp5_smp *smp,
106 enum mdp5_client_id cid, mdp5_smp_state_t *assigned) 229 enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
107{ 230{
108 int cnt = mdp5_kms->smp_blk_cnt; 231 struct mdp5_kms *mdp5_kms = get_kms(smp);
109 uint32_t blk, val; 232 int cnt = smp->blk_cnt;
233 u32 blk, val;
110 234
111 for_each_set_bit(blk, *assigned, cnt) { 235 for_each_set_bit(blk, *assigned, cnt) {
112 int idx = blk / 3; 236 int idx = blk / 3;
@@ -135,39 +259,80 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
135} 259}
136 260
137/* step #2: configure hw for union(pending, inuse): */ 261/* step #2: configure hw for union(pending, inuse): */
138void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) 262void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
139{ 263{
140 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 264 int cnt = smp->blk_cnt;
141 int cnt = mdp5_kms->smp_blk_cnt;
142 mdp5_smp_state_t assigned; 265 mdp5_smp_state_t assigned;
266 int i;
143 267
144 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 268 for (i = 0; i < pipe2nclients(pipe); i++) {
145 update_smp_state(mdp5_kms, cid, &assigned); 269 enum mdp5_client_id cid = pipe2client(pipe, i);
270 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
271
272 bitmap_or(assigned, ps->inuse, ps->pending, cnt);
273 update_smp_state(smp, cid, &assigned);
274 }
146} 275}
147 276
148/* step #3: after vblank, copy pending -> inuse: */ 277/* step #3: after vblank, copy pending -> inuse: */
149void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) 278void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
150{ 279{
151 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 280 int cnt = smp->blk_cnt;
152 int cnt = mdp5_kms->smp_blk_cnt;
153 mdp5_smp_state_t released; 281 mdp5_smp_state_t released;
282 int i;
283
284 for (i = 0; i < pipe2nclients(pipe); i++) {
285 enum mdp5_client_id cid = pipe2client(pipe, i);
286 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
287
288 /*
289 * Figure out if there are any blocks we where previously
290 * using, which can be released and made available to other
291 * clients:
292 */
293 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
294 unsigned long flags;
295
296 spin_lock_irqsave(&smp->state_lock, flags);
297 /* clear released blocks: */
298 bitmap_andnot(smp->state, smp->state, released, cnt);
299 spin_unlock_irqrestore(&smp->state_lock, flags);
154 300
155 /* 301 update_smp_state(smp, CID_UNUSED, &released);
156 * Figure out if there are any blocks we where previously 302 }
157 * using, which can be released and made available to other 303
158 * clients: 304 bitmap_copy(ps->inuse, ps->pending, cnt);
159 */
160 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
161 unsigned long flags;
162
163 spin_lock_irqsave(&smp_lock, flags);
164 /* clear released blocks: */
165 bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
166 released, cnt);
167 spin_unlock_irqrestore(&smp_lock, flags);
168
169 update_smp_state(mdp5_kms, CID_UNUSED, &released);
170 } 305 }
306}
307
308void mdp5_smp_destroy(struct mdp5_smp *smp)
309{
310 kfree(smp);
311}
312
313struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
314{
315 struct mdp5_smp *smp = NULL;
316 int ret;
317
318 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
319 if (unlikely(!smp)) {
320 ret = -ENOMEM;
321 goto fail;
322 }
323
324 smp->dev = dev;
325 smp->blk_cnt = cfg->mmb_count;
326 smp->blk_size = cfg->mmb_size;
327
328 /* statically tied MMBs cannot be re-allocated: */
329 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
330 spin_lock_init(&smp->state_lock);
331
332 return smp;
333fail:
334 if (smp)
335 mdp5_smp_destroy(smp);
171 336
172 bitmap_copy(ps->inuse, ps->pending, cnt); 337 return ERR_PTR(ret);
173} 338}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 0ab739e1a1dd..e47179f63585 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -20,22 +21,26 @@
20 21
21#include "msm_drv.h" 22#include "msm_drv.h"
22 23
23#define MAX_SMP_BLOCKS 22
24#define SMP_BLK_SIZE 4096
25#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
26
27typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
28
29struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
30 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
31 mdp5_smp_state_t pending; 26 mdp5_smp_state_t pending;
32}; 27};
33 28
34struct mdp5_kms; 29struct mdp5_kms;
30struct mdp5_smp;
31
32/*
33 * SMP module prototypes:
34 * mdp5_smp_init() returns a SMP @handler,
35 * which is then used to call the other mdp5_smp_*(handler, ...) functions.
36 */
35 37
36int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks); 38struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
37void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); 39void mdp5_smp_destroy(struct mdp5_smp *smp);
38void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
39 40
41int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
42void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
43void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
44void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
40 45
41#endif /* __MDP5_SMP_H__ */ 46#endif /* __MDP5_SMP_H__ */