aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephane Viau <sviau@codeaurora.org>2014-11-18 12:49:49 -0500
committerRob Clark <robdclark@gmail.com>2014-11-21 08:57:19 -0500
commit0deed25b65aaf495e36818481cfc9f58dfa5cd3f (patch)
tree9cde5045561c7bf864a3c4d09c01ab59e2b1f74e
parentac7a570406417e9d837f81c3a6b83fc8d629e583 (diff)
drm/msm: add multiple CRTC and overlay support
MDP5 currently support one single CRTC with its private pipe. This change allows the configuration of multiple CRTCs with the possibility to attach several public planes to these CRTCs. Signed-off-by: Stephane Viau <sviau@codeaurora.org> Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c271
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c325
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h121
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c45
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h48
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c107
9 files changed, 811 insertions, 121 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index dda38529dd56..143d988f8add 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -26,6 +26,7 @@ msm-y := \
26 mdp/mdp4/mdp4_kms.o \ 26 mdp/mdp4/mdp4_kms.o \
27 mdp/mdp4/mdp4_plane.o \ 27 mdp/mdp4/mdp4_plane.o \
28 mdp/mdp5/mdp5_cfg.o \ 28 mdp/mdp5/mdp5_cfg.o \
29 mdp/mdp5/mdp5_ctl.o \
29 mdp/mdp5/mdp5_crtc.o \ 30 mdp/mdp5/mdp5_crtc.o \
30 mdp/mdp5/mdp5_encoder.o \ 31 mdp/mdp5/mdp5_encoder.o \
31 mdp/mdp5/mdp5_irq.o \ 32 mdp/mdp5/mdp5_irq.o \
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 00c8271ad928..d0c98f9a93e1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -24,6 +24,7 @@
24 */ 24 */
25extern const struct mdp5_cfg_hw *mdp5_cfg; 25extern const struct mdp5_cfg_hw *mdp5_cfg;
26 26
27#define MAX_CTL 8
27#define MAX_BASES 8 28#define MAX_BASES 8
28#define MAX_SMP_BLOCKS 44 29#define MAX_SMP_BLOCKS 44
29#define MAX_CLIENTS 32 30#define MAX_CLIENTS 32
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index b7b32c47fd71..85f2fb460a88 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -22,14 +23,21 @@
22#include "drm_crtc_helper.h" 23#include "drm_crtc_helper.h"
23#include "drm_flip_work.h" 24#include "drm_flip_work.h"
24 25
26#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
27
25struct mdp5_crtc { 28struct mdp5_crtc {
26 struct drm_crtc base; 29 struct drm_crtc base;
27 char name[8]; 30 char name[8];
28 int id; 31 int id;
29 bool enabled; 32 bool enabled;
30 33
31 /* which mixer/encoder we route output to: */ 34 /* layer mixer used for this CRTC (+ its lock): */
32 int mixer; 35#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
36 int lm;
37 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
38
39 /* CTL used for this CRTC: */
40 void *ctl;
33 41
34 /* if there is a pending flip, these will be non-null: */ 42 /* if there is a pending flip, these will be non-null: */
35 struct drm_pending_vblank_event *event; 43 struct drm_pending_vblank_event *event;
@@ -71,25 +79,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
71 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); 79 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
72} 80}
73 81
74static void crtc_flush(struct drm_crtc *crtc) 82#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
83
84static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
85{
86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87
88 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
89 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
90}
91
92/*
93 * flush updates, to make sure hw is updated to new scanout fb,
94 * so that we can safely queue unref to current fb (ie. next
95 * vblank we know hw is done w/ previous scanout_fb).
96 */
97static void crtc_flush_all(struct drm_crtc *crtc)
75{ 98{
76 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 99 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
77 struct mdp5_kms *mdp5_kms = get_kms(crtc);
78 int id = mdp5_crtc->id;
79 struct drm_plane *plane; 100 struct drm_plane *plane;
80 uint32_t flush = 0; 101 uint32_t flush_mask = 0;
102
103 /* we could have already released CTL in the disable path: */
104 if (!mdp5_crtc->ctl)
105 return;
81 106
82 for_each_plane_on_crtc(crtc, plane) { 107 for_each_plane_on_crtc(crtc, plane) {
83 enum mdp5_pipe pipe = mdp5_plane_pipe(plane); 108 flush_mask |= mdp5_plane_get_flush(plane);
84 flush |= pipe2flush(pipe);
85 } 109 }
110 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
111 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
86 112
87 flush |= mixer2flush(mdp5_crtc->id); 113 crtc_flush(crtc, flush_mask);
88 flush |= MDP5_CTL_FLUSH_CTL;
89
90 DBG("%s: flush=%08x", mdp5_crtc->name, flush);
91
92 mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
93} 114}
94 115
95static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) 116static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
@@ -117,12 +138,6 @@ static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
117{ 138{
118 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 139 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
119 140
120 /* flush updates, to make sure hw is updated to new scanout fb,
121 * so that we can safely queue unref to current fb (ie. next
122 * vblank we know hw is done w/ previous scanout_fb).
123 */
124 crtc_flush(crtc);
125
126 if (mdp5_crtc->scanout_fb) 141 if (mdp5_crtc->scanout_fb)
127 drm_flip_work_queue(&mdp5_crtc->unref_fb_work, 142 drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
128 mdp5_crtc->scanout_fb); 143 mdp5_crtc->scanout_fb);
@@ -173,6 +188,7 @@ static void pageflip_cb(struct msm_fence_cb *cb)
173 drm_framebuffer_reference(fb); 188 drm_framebuffer_reference(fb);
174 mdp5_plane_set_scanout(crtc->primary, fb); 189 mdp5_plane_set_scanout(crtc->primary, fb);
175 update_scanout(crtc, fb); 190 update_scanout(crtc, fb);
191 crtc_flush_all(crtc);
176} 192}
177 193
178static void unref_fb_worker(struct drm_flip_work *work, void *val) 194static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -223,41 +239,68 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
223 return true; 239 return true;
224} 240}
225 241
242/*
243 * blend_setup() - blend all the planes of a CRTC
244 *
245 * When border is enabled, the border color will ALWAYS be the base layer.
246 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
247 * If disabled, the first plane starts at STAGE_BASE.
248 *
249 * Note:
250 * Border is not enabled here because the private plane is exactly
251 * the CRTC resolution.
252 */
226static void blend_setup(struct drm_crtc *crtc) 253static void blend_setup(struct drm_crtc *crtc)
227{ 254{
228 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 255 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
229 struct mdp5_kms *mdp5_kms = get_kms(crtc); 256 struct mdp5_kms *mdp5_kms = get_kms(crtc);
230 int id = mdp5_crtc->id; 257 struct drm_plane *plane;
258 const struct mdp5_cfg_hw *hw_cfg;
259 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
260 enum mdp_mixer_stage_id stage;
261 unsigned long flags;
262#define blender(stage) ((stage) - STAGE_BASE)
231 263
232 /* 264 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
233 * Hard-coded setup for now until I figure out how the
234 * layer-mixer works
235 */
236 265
237 /* LM[id]: */ 266 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
238 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), 267
239 MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); 268 /* ctl could be released already when we are shutting down: */
240 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0), 269 if (!mdp5_crtc->ctl)
241 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | 270 goto out;
242 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) | 271
243 MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA); 272 for_each_plane_on_crtc(crtc, plane) {
244 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff); 273 struct mdp5_overlay_info *overlay;
245 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00); 274
246 275 overlay = mdp5_plane_get_overlay_info(plane);
247 /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but 276 stage = overlay->zorder;
248 * we want to be setting CTL[m].LAYER[n]. Not sure what the
249 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
250 * used when chaining up mixers for high resolution displays?
251 */
252 277
253 /* CTL[id]: */ 278 /*
254 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), 279 * Note: This cannot happen with current implementation but
255 MDP5_CTL_LAYER_REG_RGB0(STAGE0) | 280 * we need to check this condition once z property is added
256 MDP5_CTL_LAYER_REG_BORDER_COLOR); 281 */
257 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); 282 BUG_ON(stage > hw_cfg->lm.nb_stages);
258 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0); 283
259 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0); 284 /* LM */
260 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0); 285 mdp5_write(mdp5_kms,
286 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
287 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
288 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
289 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
290 blender(stage)), 0xff);
291 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
292 blender(stage)), 0x00);
293 /* CTL */
294 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
295 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
296 pipe2name(mdp5_plane_pipe(plane)), stage);
297 }
298
299 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
300 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
301
302out:
303 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
261} 304}
262 305
263static int mdp5_crtc_mode_set(struct drm_crtc *crtc, 306static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
@@ -268,6 +311,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
268{ 311{
269 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 312 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
270 struct mdp5_kms *mdp5_kms = get_kms(crtc); 313 struct mdp5_kms *mdp5_kms = get_kms(crtc);
314 unsigned long flags;
271 int ret; 315 int ret;
272 316
273 mode = adjusted_mode; 317 mode = adjusted_mode;
@@ -281,6 +325,13 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
281 mode->vsync_end, mode->vtotal, 325 mode->vsync_end, mode->vtotal,
282 mode->type, mode->flags); 326 mode->type, mode->flags);
283 327
328 /* request a free CTL, if none is already allocated for this CRTC */
329 if (!mdp5_crtc->ctl) {
330 mdp5_crtc->ctl = mdp5_ctl_request(mdp5_kms->ctl_priv, crtc);
331 if (!mdp5_crtc->ctl)
332 return -EBUSY;
333 }
334
284 /* grab extra ref for update_scanout() */ 335 /* grab extra ref for update_scanout() */
285 drm_framebuffer_reference(crtc->primary->fb); 336 drm_framebuffer_reference(crtc->primary->fb);
286 337
@@ -295,12 +346,15 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
295 return ret; 346 return ret;
296 } 347 }
297 348
298 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id), 349 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
350 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
299 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | 351 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
300 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); 352 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
353 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
301 354
302 update_fb(crtc, crtc->primary->fb); 355 update_fb(crtc, crtc->primary->fb);
303 update_scanout(crtc, crtc->primary->fb); 356 update_scanout(crtc, crtc->primary->fb);
357 /* crtc_flush_all(crtc) will be called in _commit callback */
304 358
305 return 0; 359 return 0;
306} 360}
@@ -317,7 +371,7 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
317static void mdp5_crtc_commit(struct drm_crtc *crtc) 371static void mdp5_crtc_commit(struct drm_crtc *crtc)
318{ 372{
319 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 373 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
320 crtc_flush(crtc); 374 crtc_flush_all(crtc);
321 /* drop the ref to mdp clk's that we got in prepare: */ 375 /* drop the ref to mdp clk's that we got in prepare: */
322 mdp5_disable(get_kms(crtc)); 376 mdp5_disable(get_kms(crtc));
323} 377}
@@ -343,6 +397,7 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
343 397
344 update_fb(crtc, crtc->primary->fb); 398 update_fb(crtc, crtc->primary->fb);
345 update_scanout(crtc, crtc->primary->fb); 399 update_scanout(crtc, crtc->primary->fb);
400 crtc_flush_all(crtc);
346 401
347 return 0; 402 return 0;
348} 403}
@@ -351,6 +406,19 @@ static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
351{ 406{
352} 407}
353 408
409static void mdp5_crtc_disable(struct drm_crtc *crtc)
410{
411 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
412
413 DBG("%s", mdp5_crtc->name);
414
415 if (mdp5_crtc->ctl) {
416 mdp5_ctl_release(mdp5_crtc->ctl);
417 mdp5_crtc->ctl = NULL;
418 }
419}
420
421
354static int mdp5_crtc_page_flip(struct drm_crtc *crtc, 422static int mdp5_crtc_page_flip(struct drm_crtc *crtc,
355 struct drm_framebuffer *new_fb, 423 struct drm_framebuffer *new_fb,
356 struct drm_pending_vblank_event *event, 424 struct drm_pending_vblank_event *event,
@@ -399,6 +467,7 @@ static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
399 .commit = mdp5_crtc_commit, 467 .commit = mdp5_crtc_commit,
400 .mode_set_base = mdp5_crtc_mode_set_base, 468 .mode_set_base = mdp5_crtc_mode_set_base,
401 .load_lut = mdp5_crtc_load_lut, 469 .load_lut = mdp5_crtc_load_lut,
470 .disable = mdp5_crtc_disable,
402}; 471};
403 472
404static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 473static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -421,9 +490,8 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
421static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) 490static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
422{ 491{
423 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); 492 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
424 struct drm_crtc *crtc = &mdp5_crtc->base; 493
425 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); 494 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
426 crtc_flush(crtc);
427} 495}
428 496
429uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) 497uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -444,10 +512,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
444{ 512{
445 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 513 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
446 struct mdp5_kms *mdp5_kms = get_kms(crtc); 514 struct mdp5_kms *mdp5_kms = get_kms(crtc);
447 static const enum mdp5_intfnum intfnum[] = { 515 uint32_t flush_mask = 0;
448 INTF0, INTF1, INTF2, INTF3,
449 };
450 uint32_t intf_sel; 516 uint32_t intf_sel;
517 unsigned long flags;
451 518
452 /* now that we know what irq's we want: */ 519 /* now that we know what irq's we want: */
453 mdp5_crtc->err.irqmask = intf2err(intf); 520 mdp5_crtc->err.irqmask = intf2err(intf);
@@ -457,6 +524,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
457 if (!mdp5_kms) 524 if (!mdp5_kms)
458 return; 525 return;
459 526
527 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
460 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); 528 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
461 529
462 switch (intf) { 530 switch (intf) {
@@ -481,16 +549,24 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
481 break; 549 break;
482 } 550 }
483 551
484 blend_setup(crtc); 552 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
553 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
485 554
486 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); 555 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
556 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
557 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
558 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
487 559
488 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); 560 crtc_flush(crtc, flush_mask);
489 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id), 561}
490 MDP5_CTL_OP_MODE(MODE_NONE) |
491 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
492 562
493 crtc_flush(crtc); 563static int count_planes(struct drm_crtc *crtc)
564{
565 struct drm_plane *plane;
566 int cnt = 0;
567 for_each_plane_on_crtc(crtc, plane)
568 cnt++;
569 return cnt;
494} 570}
495 571
496static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, 572static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
@@ -498,14 +574,68 @@ static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id,
498{ 574{
499 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 575 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
500 576
577 if (plane)
578 plane->crtc = crtc;
579
580 DBG("%s: %d planes attached", mdp5_crtc->name, count_planes(crtc));
581
501 blend_setup(crtc); 582 blend_setup(crtc);
502 if (mdp5_crtc->enabled && (plane != crtc->primary)) 583 if (mdp5_crtc->enabled)
503 crtc_flush(crtc); 584 crtc_flush_all(crtc);
504} 585}
505 586
506void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane) 587int mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
507{ 588{
589 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
590 struct mdp5_kms *mdp5_kms = get_kms(crtc);
591 struct device *dev = crtc->dev->dev;
592 const struct mdp5_cfg_hw *hw_cfg;
593 bool private_plane = (plane == crtc->primary);
594 struct mdp5_overlay_info overlay_info;
595 enum mdp_mixer_stage_id stage = STAGE_BASE;
596 int max_nb_planes;
597
598 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv);
599 max_nb_planes = hw_cfg->lm.nb_stages;
600
601 if (count_planes(crtc) >= max_nb_planes) {
602 dev_err(dev, "%s: max # of planes (%d) reached\n",
603 mdp5_crtc->name, max_nb_planes);
604 return -EBUSY;
605 }
606
607 /*
608 * Set default z-ordering depending on the type of plane
609 * private -> lower stage
610 * public -> topmost stage
611 *
612 * TODO: add a property to give userspace an API to change this...
613 * (will come in a subsequent patch)
614 */
615 if (private_plane) {
616 stage = STAGE_BASE;
617 } else {
618 struct drm_plane *attached_plane;
619 for_each_plane_on_crtc(crtc, attached_plane) {
620 struct mdp5_overlay_info *overlay;
621
622 if (!attached_plane)
623 continue;
624 overlay = mdp5_plane_get_overlay_info(attached_plane);
625 stage = max(stage, overlay->zorder);
626 }
627 stage++;
628 }
629 overlay_info.zorder = stage;
630 mdp5_plane_set_overlay_info(plane, &overlay_info);
631
632 DBG("%s: %s plane %s set to stage %d by default", mdp5_crtc->name,
633 private_plane ? "private" : "public",
634 pipe2name(mdp5_plane_pipe(plane)), overlay_info.zorder);
635
508 set_attach(crtc, mdp5_plane_pipe(plane), plane); 636 set_attach(crtc, mdp5_plane_pipe(plane), plane);
637
638 return 0;
509} 639}
510 640
511void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane) 641void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
@@ -516,6 +646,16 @@ void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
516 set_attach(crtc, mdp5_plane_pipe(plane), NULL); 646 set_attach(crtc, mdp5_plane_pipe(plane), NULL);
517} 647}
518 648
649int mdp5_crtc_get_lm(struct drm_crtc *crtc)
650{
651 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
652
653 if (WARN_ON(!crtc))
654 return -EINVAL;
655
656 return mdp5_crtc->lm;
657}
658
519/* initialize crtc */ 659/* initialize crtc */
520struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 660struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
521 struct drm_plane *plane, int id) 661 struct drm_plane *plane, int id)
@@ -530,6 +670,9 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
530 crtc = &mdp5_crtc->base; 670 crtc = &mdp5_crtc->base;
531 671
532 mdp5_crtc->id = id; 672 mdp5_crtc->id = id;
673 mdp5_crtc->lm = GET_LM_ID(id);
674
675 spin_lock_init(&mdp5_crtc->lm_lock);
533 676
534 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; 677 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
535 mdp5_crtc->err.irq = mdp5_crtc_err_irq; 678 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
new file mode 100644
index 000000000000..a6155b77cb13
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -0,0 +1,325 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_ctl.h"
16
17/*
18 * CTL - MDP Control Pool Manager
19 *
20 * Controls are shared between all CRTCs.
21 *
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
25 *
26 * Hardware capabilities determine the number of concurrent data paths
27 *
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
30 *
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
34 */
35
36struct mdp5_ctl {
37 u32 id;
38
39 /* whether this CTL has been allocated or not: */
40 bool busy;
41
42 /* memory output connection (@see mdp5_ctl_mode): */
43 u32 mode;
44
45 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
46 spinlock_t hw_lock;
47 u32 reg_offset;
48
49 /* flush mask used to commit CTL registers */
50 u32 flush_mask;
51
52 bool cursor_on;
53 void *crtc;
54};
55
56struct mdp5_ctl_manager {
57 struct drm_device *dev;
58
59 /* number of CTL / Layer Mixers in this hw config: */
60 u32 nlm;
61 u32 nctl;
62
63 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
64 spinlock_t pool_lock;
65 struct mdp5_ctl ctls[MAX_CTL];
66};
67
68static struct mdp5_ctl_manager mdp5_ctl_mgr;
69
70static inline
71struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
72{
73 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
74
75 return to_mdp5_kms(to_mdp_kms(priv->kms));
76}
77
78static inline
79void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
80{
81 struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
82 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
83
84 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
85 mdp5_write(mdp5_kms, reg, data);
86}
87
88static inline
89u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
90{
91 struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
92 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
93
94 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
95 return mdp5_read(mdp5_kms, reg);
96}
97
98
99int mdp5_ctl_set_intf(void *c, enum mdp5_intf intf)
100{
101 struct mdp5_ctl *ctl = c;
102 unsigned long flags;
103 static const enum mdp5_intfnum intfnum[] = {
104 INTF0, INTF1, INTF2, INTF3,
105 };
106
107 spin_lock_irqsave(&ctl->hw_lock, flags);
108 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
109 MDP5_CTL_OP_MODE(ctl->mode) |
110 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
111 spin_unlock_irqrestore(&ctl->hw_lock, flags);
112
113 return 0;
114}
115
116int mdp5_ctl_set_cursor(void *c, bool enable)
117{
118 struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
119 struct mdp5_ctl *ctl = c;
120 unsigned long flags;
121 u32 blend_cfg;
122 int lm;
123
124 lm = mdp5_crtc_get_lm(ctl->crtc);
125 if (unlikely(WARN_ON(lm < 0))) {
126 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
127 ctl->id, lm);
128 return -EINVAL;
129 }
130
131 spin_lock_irqsave(&ctl->hw_lock, flags);
132
133 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
134
135 if (enable)
136 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
137 else
138 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
139
140 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
141
142 spin_unlock_irqrestore(&ctl->hw_lock, flags);
143
144 ctl->cursor_on = enable;
145
146 return 0;
147}
148
149
150int mdp5_ctl_blend(void *c, u32 lm, u32 blend_cfg)
151{
152 struct mdp5_ctl *ctl = c;
153 unsigned long flags;
154
155 if (ctl->cursor_on)
156 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
157 else
158 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
159
160 spin_lock_irqsave(&ctl->hw_lock, flags);
161 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
162 spin_unlock_irqrestore(&ctl->hw_lock, flags);
163
164 return 0;
165}
166
167int mdp5_ctl_commit(void *c, u32 flush_mask)
168{
169 struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
170 struct mdp5_ctl *ctl = c;
171 unsigned long flags;
172
173 if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
174 int lm = mdp5_crtc_get_lm(ctl->crtc);
175
176 if (unlikely(WARN_ON(lm < 0))) {
177 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
178 ctl->id, lm);
179 return -EINVAL;
180 }
181
182 /* for current targets, cursor bit is the same as LM bit */
183 flush_mask |= mdp_ctl_flush_mask_lm(lm);
184 }
185
186 spin_lock_irqsave(&ctl->hw_lock, flags);
187 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
188 spin_unlock_irqrestore(&ctl->hw_lock, flags);
189
190 return 0;
191}
192
193u32 mdp5_ctl_get_flush(void *c)
194{
195 struct mdp5_ctl *ctl = c;
196
197 return ctl->flush_mask;
198}
199
200void mdp5_ctl_release(void *c)
201{
202 struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
203 struct mdp5_ctl *ctl = c;
204 unsigned long flags;
205
206 if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
207 dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
208 ctl->id, ctl->busy);
209 return;
210 }
211
212 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
213 ctl->busy = false;
214 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
215
216 DBG("CTL %d released", ctl->id);
217}
218
219/*
220 * mdp5_ctl_request() - CTL dynamic allocation
221 *
222 * Note: Current implementation considers that we can only have one CRTC per CTL
223 *
224 * @return first free CTL
225 */
226void *mdp5_ctl_request(void *ctlm, void *crtc)
227{
228 struct mdp5_ctl_manager *ctl_mgr = ctlm;
229 struct mdp5_ctl *ctl = NULL;
230 unsigned long flags;
231 int c;
232
233 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
234
235 for (c = 0; c < ctl_mgr->nctl; c++)
236 if (!ctl_mgr->ctls[c].busy)
237 break;
238
239 if (unlikely(c >= ctl_mgr->nctl)) {
240 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
241 goto unlock;
242 }
243
244 ctl = &ctl_mgr->ctls[c];
245
246 ctl->crtc = crtc;
247 ctl->busy = true;
248 DBG("CTL %d allocated", ctl->id);
249
250unlock:
251 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
252 return ctl;
253}
254
255void mdp5_ctlm_hw_reset(void *ctlm)
256{
257 struct mdp5_ctl_manager *ctl_mgr = ctlm;
258 unsigned long flags;
259 int c;
260
261 for (c = 0; c < ctl_mgr->nctl; c++) {
262 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
263
264 spin_lock_irqsave(&ctl->hw_lock, flags);
265 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
266 spin_unlock_irqrestore(&ctl->hw_lock, flags);
267 }
268}
269
270void mdp5_ctlm_destroy(void *ctlm)
271{
272 struct mdp5_ctl_manager *ctl_mgr = ctlm;
273
274 kfree(ctl_mgr);
275}
276
277void *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base,
278 const struct mdp5_cfg_hw *hw_cfg)
279{
280 struct mdp5_ctl_manager *ctl_mgr = &mdp5_ctl_mgr;
281 const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
282 unsigned long flags;
283 int c, ret;
284
285 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
286 dev_err(dev->dev, "Increase static pool size to at least %d\n",
287 ctl_cfg->count);
288 ret = -ENOSPC;
289 goto fail;
290 }
291
292 /* initialize the CTL manager: */
293 ctl_mgr->dev = dev;
294 ctl_mgr->nlm = hw_cfg->lm.count;
295 ctl_mgr->nctl = ctl_cfg->count;
296 spin_lock_init(&ctl_mgr->pool_lock);
297
298 /* initialize each CTL of the pool: */
299 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
300 for (c = 0; c < ctl_mgr->nctl; c++) {
301 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
302
303 if (WARN_ON(!ctl_cfg->base[c])) {
304 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
305 ret = -EINVAL;
306 goto fail;
307 }
308 ctl->id = c;
309 ctl->mode = MODE_NONE;
310 ctl->reg_offset = ctl_cfg->base[c];
311 ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
312 ctl->busy = false;
313 spin_lock_init(&ctl->hw_lock);
314 }
315 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
316 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
317
318 return ctl_mgr;
319
320fail:
321 if (ctl_mgr)
322 mdp5_ctlm_destroy(ctl_mgr);
323
324 return ERR_PTR(ret);
325}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
new file mode 100644
index 000000000000..dbe1cae71937
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -0,0 +1,121 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CTL_H__
15#define __MDP5_CTL_H__
16
17#include "msm_drv.h"
18
19/*
20 * CTL Manager prototypes:
21 * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
22 * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
23 */
24void *mdp5_ctlm_init(struct drm_device *dev, void __iomem *mmio_base,
25 const struct mdp5_cfg_hw *hw_cfg);
26void mdp5_ctlm_hw_reset(void *ctlm);
27void mdp5_ctlm_destroy(void *ctlm);
28
29/*
30 * CTL prototypes:
31 * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
32 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
33 */
34void *mdp5_ctl_request(void *ctlm, void *crtc);
35
36int mdp5_ctl_set_intf(void *ctl, enum mdp5_intf intf);
37
38int mdp5_ctl_set_cursor(void *ctl, bool enable);
39
40/* @blend_cfg: see LM blender config definition below */
41int mdp5_ctl_blend(void *ctl, u32 lm, u32 blend_cfg);
42
43/* @flush_mask: see CTL flush masks definitions below */
44int mdp5_ctl_commit(void *ctl, u32 flush_mask);
45u32 mdp5_ctl_get_flush(void *ctl);
46
47void mdp5_ctl_release(void *ctl);
48
49/*
50 * blend_cfg (LM blender config):
51 *
52 * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
53 * are being blended according to their stage (z-order), through @blend_cfg arg.
54 */
55static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
56 enum mdp_mixer_stage_id stage)
57{
58 switch (pipe) {
59 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
60 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
61 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
62 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
63 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
64 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
65 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
66 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
67 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
68 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
69 default: return 0;
70 }
71}
72
73/*
74 * flush_mask (CTL flush masks):
75 *
76 * The following functions allow each DRM entity to get and store
77 * their own flush mask.
78 * Once stored, these masks will then be accessed through each DRM's
79 * interface and used by the caller of mdp5_ctl_commit() to specify
80 * which block(s) need to be flushed through @flush_mask parameter.
81 */
82
83#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
84
85static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
86{
87 /* TODO: use id once multiple cursor support is present */
88 (void)cursor_id;
89
90 return MDP5_CTL_FLUSH_CURSOR_DUMMY;
91}
92
93static inline u32 mdp_ctl_flush_mask_lm(int lm)
94{
95 switch (lm) {
96 case 0: return MDP5_CTL_FLUSH_LM0;
97 case 1: return MDP5_CTL_FLUSH_LM1;
98 case 2: return MDP5_CTL_FLUSH_LM2;
99 case 5: return MDP5_CTL_FLUSH_LM5;
100 default: return 0;
101 }
102}
103
104static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
105{
106 switch (pipe) {
107 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
108 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
109 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
110 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
111 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
112 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
113 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
114 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
115 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
116 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
117 default: return 0;
118 }
119}
120
121#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index edec7bfaa952..25c2fcb39ac3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -24,6 +24,7 @@ struct mdp5_encoder {
24 struct drm_encoder base; 24 struct drm_encoder base;
25 int intf; 25 int intf;
26 enum mdp5_intf intf_id; 26 enum mdp5_intf intf_id;
27 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
27 bool enabled; 28 bool enabled;
28 uint32_t bsc; 29 uint32_t bsc;
29}; 30};
@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
115 struct mdp5_kms *mdp5_kms = get_kms(encoder); 116 struct mdp5_kms *mdp5_kms = get_kms(encoder);
116 int intf = mdp5_encoder->intf; 117 int intf = mdp5_encoder->intf;
117 bool enabled = (mode == DRM_MODE_DPMS_ON); 118 bool enabled = (mode == DRM_MODE_DPMS_ON);
119 unsigned long flags;
118 120
119 DBG("mode=%d", mode); 121 DBG("mode=%d", mode);
120 122
@@ -123,9 +125,13 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
123 125
124 if (enabled) { 126 if (enabled) {
125 bs_set(mdp5_encoder, 1); 127 bs_set(mdp5_encoder, 1);
128 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
126 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); 129 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
130 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
127 } else { 131 } else {
132 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
128 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); 133 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
134 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
129 bs_set(mdp5_encoder, 0); 135 bs_set(mdp5_encoder, 0);
130 } 136 }
131 137
@@ -150,6 +156,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
150 uint32_t display_v_start, display_v_end; 156 uint32_t display_v_start, display_v_end;
151 uint32_t hsync_start_x, hsync_end_x; 157 uint32_t hsync_start_x, hsync_end_x;
152 uint32_t format; 158 uint32_t format;
159 unsigned long flags;
153 160
154 mode = adjusted_mode; 161 mode = adjusted_mode;
155 162
@@ -180,6 +187,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
180 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; 187 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
181 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; 188 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
182 189
190 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
191
183 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), 192 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
184 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | 193 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
185 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); 194 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
@@ -201,6 +210,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
201 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); 210 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
202 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); 211 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
203 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ 212 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
213
214 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
204} 215}
205 216
206static void mdp5_encoder_prepare(struct drm_encoder *encoder) 217static void mdp5_encoder_prepare(struct drm_encoder *encoder)
@@ -242,6 +253,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
242 mdp5_encoder->intf_id = intf_id; 253 mdp5_encoder->intf_id = intf_id;
243 encoder = &mdp5_encoder->base; 254 encoder = &mdp5_encoder->base;
244 255
256 spin_lock_init(&mdp5_encoder->intf_lock);
257
245 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, 258 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS); 259 DRM_MODE_ENCODER_TMDS);
247 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 260 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index adb45419b08d..da248c2b4fe8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -28,9 +28,8 @@ static const char *iommu_ports[] = {
28static int mdp5_hw_init(struct msm_kms *kms) 28static int mdp5_hw_init(struct msm_kms *kms)
29{ 29{
30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
31 const struct mdp5_cfg_hw *hw_cfg;
32 struct drm_device *dev = mdp5_kms->dev; 31 struct drm_device *dev = mdp5_kms->dev;
33 int i; 32 unsigned long flags;
34 33
35 pm_runtime_get_sync(dev->dev); 34 pm_runtime_get_sync(dev->dev);
36 35
@@ -58,12 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
58 * care. 57 * care.
59 */ 58 */
60 59
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
61 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 61 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
62 63
63 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg_priv); 64 mdp5_ctlm_hw_reset(mdp5_kms->ctl_priv);
64
65 for (i = 0; i < hw_cfg->ctl.count; i++)
66 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
67 65
68 pm_runtime_put_sync(dev->dev); 66 pm_runtime_put_sync(dev->dev);
69 67
@@ -92,6 +90,7 @@ static void mdp5_destroy(struct msm_kms *kms)
92 struct msm_mmu *mmu = mdp5_kms->mmu; 90 struct msm_mmu *mmu = mdp5_kms->mmu;
93 void *smp = mdp5_kms->smp_priv; 91 void *smp = mdp5_kms->smp_priv;
94 void *cfg = mdp5_kms->cfg_priv; 92 void *cfg = mdp5_kms->cfg_priv;
93 void *ctl = mdp5_kms->ctl_priv;
95 94
96 mdp5_irq_domain_fini(mdp5_kms); 95 mdp5_irq_domain_fini(mdp5_kms);
97 96
@@ -99,7 +98,8 @@ static void mdp5_destroy(struct msm_kms *kms)
99 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); 98 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
100 mmu->funcs->destroy(mmu); 99 mmu->funcs->destroy(mmu);
101 } 100 }
102 101 if (ctl)
102 mdp5_ctlm_destroy(ctl);
103 if (smp) 103 if (smp)
104 mdp5_smp_destroy(smp); 104 mdp5_smp_destroy(smp);
105 if (cfg) 105 if (cfg)
@@ -154,6 +154,9 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
154 static const enum mdp5_pipe crtcs[] = { 154 static const enum mdp5_pipe crtcs[] = {
155 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 155 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
156 }; 156 };
157 static const enum mdp5_pipe pub_planes[] = {
158 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
159 };
157 struct drm_device *dev = mdp5_kms->dev; 160 struct drm_device *dev = mdp5_kms->dev;
158 struct msm_drm_private *priv = dev->dev_private; 161 struct msm_drm_private *priv = dev->dev_private;
159 struct drm_encoder *encoder; 162 struct drm_encoder *encoder;
@@ -169,12 +172,13 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
169 if (ret) 172 if (ret)
170 goto fail; 173 goto fail;
171 174
172 /* construct CRTCs: */ 175 /* construct CRTCs and their private planes: */
173 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) { 176 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
174 struct drm_plane *plane; 177 struct drm_plane *plane;
175 struct drm_crtc *crtc; 178 struct drm_crtc *crtc;
176 179
177 plane = mdp5_plane_init(dev, crtcs[i], true); 180 plane = mdp5_plane_init(dev, crtcs[i], true,
181 hw_cfg->pipe_rgb.base[i]);
178 if (IS_ERR(plane)) { 182 if (IS_ERR(plane)) {
179 ret = PTR_ERR(plane); 183 ret = PTR_ERR(plane);
180 dev_err(dev->dev, "failed to construct plane for %s (%d)\n", 184 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -192,6 +196,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
192 priv->crtcs[priv->num_crtcs++] = crtc; 196 priv->crtcs[priv->num_crtcs++] = crtc;
193 } 197 }
194 198
199 /* Construct public planes: */
200 for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
201 struct drm_plane *plane;
202
203 plane = mdp5_plane_init(dev, pub_planes[i], false,
204 hw_cfg->pipe_vig.base[i]);
205 if (IS_ERR(plane)) {
206 ret = PTR_ERR(plane);
207 dev_err(dev->dev, "failed to construct %s plane: %d\n",
208 pipe2name(pub_planes[i]), ret);
209 goto fail;
210 }
211 }
212
195 /* Construct encoder for HDMI: */ 213 /* Construct encoder for HDMI: */
196 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); 214 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
197 if (IS_ERR(encoder)) { 215 if (IS_ERR(encoder)) {
@@ -274,6 +292,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
274 goto fail; 292 goto fail;
275 } 293 }
276 294
295 spin_lock_init(&mdp5_kms->resource_lock);
296
277 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 297 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
278 298
279 kms = &mdp5_kms->base.base; 299 kms = &mdp5_kms->base.base;
@@ -348,6 +368,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
348 } 368 }
349 mdp5_kms->smp_priv = priv; 369 mdp5_kms->smp_priv = priv;
350 370
371 priv = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
372 if (IS_ERR(priv)) {
373 ret = PTR_ERR(priv);
374 goto fail;
375 }
376 mdp5_kms->ctl_priv = priv;
377
351 /* make sure things are off before attaching iommu (bootloader could 378 /* make sure things are off before attaching iommu (bootloader could
352 * have left things on, in which case we'll start getting faults if 379 * have left things on, in which case we'll start getting faults if
353 * we don't disable): 380 * we don't disable):
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index daca8da64666..77fd43ea912e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -23,6 +23,7 @@
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */ 24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
25#include "mdp5.xml.h" 25#include "mdp5.xml.h"
26#include "mdp5_ctl.h"
26#include "mdp5_smp.h" 27#include "mdp5_smp.h"
27 28
28struct mdp5_kms { 29struct mdp5_kms {
@@ -37,6 +38,7 @@ struct mdp5_kms {
37 struct msm_mmu *mmu; 38 struct msm_mmu *mmu;
38 39
39 void *smp_priv; 40 void *smp_priv;
41 void *ctl_priv;
40 42
41 /* io/register spaces: */ 43 /* io/register spaces: */
42 void __iomem *mmio, *vbif; 44 void __iomem *mmio, *vbif;
@@ -50,6 +52,12 @@ struct mdp5_kms {
50 struct clk *lut_clk; 52 struct clk *lut_clk;
51 struct clk *vsync_clk; 53 struct clk *vsync_clk;
52 54
55 /*
56 * lock to protect access to global resources: ie., following register:
57 * - REG_MDP5_DISP_INTF_SEL
58 */
59 spinlock_t resource_lock;
60
53 struct mdp_irq error_handler; 61 struct mdp_irq error_handler;
54 62
55 struct { 63 struct {
@@ -59,6 +67,10 @@ struct mdp5_kms {
59}; 67};
60#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 68#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
61 69
70struct mdp5_overlay_info {
71 enum mdp_mixer_stage_id zorder;
72};
73
62static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) 74static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
63{ 75{
64 msm_writel(data, mdp5_kms->mmio + reg); 76 msm_writel(data, mdp5_kms->mmio + reg);
@@ -82,23 +94,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
82 return names[pipe]; 94 return names[pipe];
83} 95}
84 96
85static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
86{
87 switch (pipe) {
88 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
89 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
90 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
91 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
92 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
93 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
94 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
95 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
96 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
97 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
98 default: return 0;
99 }
100}
101
102static inline int pipe2nclients(enum mdp5_pipe pipe) 97static inline int pipe2nclients(enum mdp5_pipe pipe)
103{ 98{
104 switch (pipe) { 99 switch (pipe) {
@@ -112,16 +107,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
112 } 107 }
113} 108}
114 109
115static inline uint32_t mixer2flush(int lm)
116{
117 switch (lm) {
118 case 0: return MDP5_CTL_FLUSH_LM0;
119 case 1: return MDP5_CTL_FLUSH_LM1;
120 case 2: return MDP5_CTL_FLUSH_LM2;
121 default: return 0;
122 }
123}
124
125static inline uint32_t intf2err(int intf) 110static inline uint32_t intf2err(int intf)
126{ 111{
127 switch (intf) { 112 switch (intf) {
@@ -169,6 +154,10 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
169 154
170void mdp5_plane_install_properties(struct drm_plane *plane, 155void mdp5_plane_install_properties(struct drm_plane *plane,
171 struct drm_mode_object *obj); 156 struct drm_mode_object *obj);
157void mdp5_plane_set_overlay_info(struct drm_plane *plane,
158 const struct mdp5_overlay_info *overlay_info);
159struct mdp5_overlay_info *mdp5_plane_get_overlay_info(struct drm_plane *plane);
160uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
172void mdp5_plane_set_scanout(struct drm_plane *plane, 161void mdp5_plane_set_scanout(struct drm_plane *plane,
173 struct drm_framebuffer *fb); 162 struct drm_framebuffer *fb);
174int mdp5_plane_mode_set(struct drm_plane *plane, 163int mdp5_plane_mode_set(struct drm_plane *plane,
@@ -180,14 +169,15 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
180void mdp5_plane_complete_flip(struct drm_plane *plane); 169void mdp5_plane_complete_flip(struct drm_plane *plane);
181enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 170enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
182struct drm_plane *mdp5_plane_init(struct drm_device *dev, 171struct drm_plane *mdp5_plane_init(struct drm_device *dev,
183 enum mdp5_pipe pipe, bool private_plane); 172 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
184 173
185uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 174uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
186 175
176int mdp5_crtc_get_lm(struct drm_crtc *crtc);
187void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 177void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
188void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, 178void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
189 enum mdp5_intf intf_id); 179 enum mdp5_intf intf_id);
190void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane); 180int mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
191void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane); 181void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
192struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 182struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
193 struct drm_plane *plane, int id); 183 struct drm_plane *plane, int id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 633ca08bb014..59703faa9d13 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,6 +18,7 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#define MAX_PLANE 4
20 22
21struct mdp5_plane { 23struct mdp5_plane {
22 struct drm_plane base; 24 struct drm_plane base;
@@ -24,6 +26,13 @@ struct mdp5_plane {
24 26
25 enum mdp5_pipe pipe; 27 enum mdp5_pipe pipe;
26 28
29 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
30 uint32_t reg_offset;
31
32 uint32_t flush_mask; /* used to commit pipe registers */
33
34 struct mdp5_overlay_info overlay_info;
35
27 uint32_t nformats; 36 uint32_t nformats;
28 uint32_t formats[32]; 37 uint32_t formats[32];
29 38
@@ -95,6 +104,22 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
95 kfree(mdp5_plane); 104 kfree(mdp5_plane);
96} 105}
97 106
107void mdp5_plane_set_overlay_info(struct drm_plane *plane,
108 const struct mdp5_overlay_info *overlay_info)
109{
110 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
111
112 memcpy(&mdp5_plane->overlay_info, overlay_info, sizeof(*overlay_info));
113}
114
115struct mdp5_overlay_info *mdp5_plane_get_overlay_info(
116 struct drm_plane *plane)
117{
118 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
119
120 return &mdp5_plane->overlay_info;
121}
122
98/* helper to install properties which are common to planes and crtcs */ 123/* helper to install properties which are common to planes and crtcs */
99void mdp5_plane_install_properties(struct drm_plane *plane, 124void mdp5_plane_install_properties(struct drm_plane *plane,
100 struct drm_mode_object *obj) 125 struct drm_mode_object *obj)
@@ -116,35 +141,58 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
116 .set_property = mdp5_plane_set_property, 141 .set_property = mdp5_plane_set_property,
117}; 142};
118 143
119void mdp5_plane_set_scanout(struct drm_plane *plane, 144static int get_fb_addr(struct drm_plane *plane, struct drm_framebuffer *fb,
120 struct drm_framebuffer *fb) 145 uint32_t iova[MAX_PLANE])
121{ 146{
122 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
123 struct mdp5_kms *mdp5_kms = get_kms(plane); 147 struct mdp5_kms *mdp5_kms = get_kms(plane);
124 enum mdp5_pipe pipe = mdp5_plane->pipe;
125 uint32_t nplanes = drm_format_num_planes(fb->pixel_format); 148 uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
126 uint32_t iova[4];
127 int i; 149 int i;
128 150
129 for (i = 0; i < nplanes; i++) { 151 for (i = 0; i < nplanes; i++) {
130 struct drm_gem_object *bo = msm_framebuffer_bo(fb, i); 152 struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
131 msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]); 153 msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
132 } 154 }
133 for (; i < 4; i++) 155 for (; i < MAX_PLANE; i++)
134 iova[i] = 0; 156 iova[i] = 0;
135 157
158 return 0;
159}
160
161static void set_scanout_locked(struct drm_plane *plane,
162 uint32_t pitches[MAX_PLANE], uint32_t src_addr[MAX_PLANE])
163{
164 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
165 struct mdp5_kms *mdp5_kms = get_kms(plane);
166 enum mdp5_pipe pipe = mdp5_plane->pipe;
167
168 WARN_ON(!spin_is_locked(&mdp5_plane->pipe_lock));
169
136 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 170 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
137 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 171 MDP5_PIPE_SRC_STRIDE_A_P0(pitches[0]) |
138 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1])); 172 MDP5_PIPE_SRC_STRIDE_A_P1(pitches[1]));
139 173
140 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe), 174 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
141 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | 175 MDP5_PIPE_SRC_STRIDE_B_P2(pitches[2]) |
142 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); 176 MDP5_PIPE_SRC_STRIDE_B_P3(pitches[3]));
177
178 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), src_addr[0]);
179 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), src_addr[1]);
180 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), src_addr[2]);
181 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), src_addr[3]);
182}
183
184void mdp5_plane_set_scanout(struct drm_plane *plane,
185 struct drm_framebuffer *fb)
186{
187 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
188 uint32_t src_addr[MAX_PLANE];
189 unsigned long flags;
143 190
144 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]); 191 get_fb_addr(plane, fb, src_addr);
145 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]); 192
146 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]); 193 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
147 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]); 194 set_scanout_locked(plane, fb->pitches, src_addr);
195 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
148 196
149 plane->fb = fb; 197 plane->fb = fb;
150} 198}
@@ -163,6 +211,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
163 uint32_t nplanes, config = 0; 211 uint32_t nplanes, config = 0;
164 uint32_t phasex_step = 0, phasey_step = 0; 212 uint32_t phasex_step = 0, phasey_step = 0;
165 uint32_t hdecm = 0, vdecm = 0; 213 uint32_t hdecm = 0, vdecm = 0;
214 uint32_t src_addr[MAX_PLANE];
215 unsigned long flags;
166 int ret; 216 int ret;
167 217
168 nplanes = drm_format_num_planes(fb->pixel_format); 218 nplanes = drm_format_num_planes(fb->pixel_format);
@@ -205,6 +255,12 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
205 /* TODO calc phasey_step, vdecm */ 255 /* TODO calc phasey_step, vdecm */
206 } 256 }
207 257
258 ret = get_fb_addr(plane, fb, src_addr);
259 if (ret)
260 return ret;
261
262 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
263
208 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), 264 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
209 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | 265 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
210 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); 266 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
@@ -225,8 +281,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
225 MDP5_PIPE_OUT_XY_X(crtc_x) | 281 MDP5_PIPE_OUT_XY_X(crtc_x) |
226 MDP5_PIPE_OUT_XY_Y(crtc_y)); 282 MDP5_PIPE_OUT_XY_Y(crtc_y));
227 283
228 mdp5_plane_set_scanout(plane, fb);
229
230 format = to_mdp_format(msm_framebuffer_format(fb)); 284 format = to_mdp_format(msm_framebuffer_format(fb));
231 285
232 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), 286 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
@@ -266,10 +320,14 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
266 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | 320 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
267 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); 321 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
268 322
323 set_scanout_locked(plane, fb->pitches, src_addr);
324
325 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
326
269 /* TODO detach from old crtc (if we had more than one) */ 327 /* TODO detach from old crtc (if we had more than one) */
270 mdp5_crtc_attach(crtc, plane); 328 ret = mdp5_crtc_attach(crtc, plane);
271 329
272 return 0; 330 return ret;
273} 331}
274 332
275void mdp5_plane_complete_flip(struct drm_plane *plane) 333void mdp5_plane_complete_flip(struct drm_plane *plane)
@@ -286,9 +344,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
286 return mdp5_plane->pipe; 344 return mdp5_plane->pipe;
287} 345}
288 346
347uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
348{
349 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
350
351 return mdp5_plane->flush_mask;
352}
353
289/* initialize plane */ 354/* initialize plane */
290struct drm_plane *mdp5_plane_init(struct drm_device *dev, 355struct drm_plane *mdp5_plane_init(struct drm_device *dev,
291 enum mdp5_pipe pipe, bool private_plane) 356 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
292{ 357{
293 struct drm_plane *plane = NULL; 358 struct drm_plane *plane = NULL;
294 struct mdp5_plane *mdp5_plane; 359 struct mdp5_plane *mdp5_plane;
@@ -309,6 +374,10 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
309 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, 374 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
310 ARRAY_SIZE(mdp5_plane->formats)); 375 ARRAY_SIZE(mdp5_plane->formats));
311 376
377 mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
378 mdp5_plane->reg_offset = reg_offset;
379 spin_lock_init(&mdp5_plane->pipe_lock);
380
312 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 381 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
313 drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 382 drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
314 mdp5_plane->formats, mdp5_plane->nformats, 383 mdp5_plane->formats, mdp5_plane->nformats,