aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
authorStephane Viau <sviau@codeaurora.org>2015-03-13 15:49:33 -0400
committerRob Clark <robdclark@gmail.com>2015-04-01 19:29:34 -0400
commit389b09a1822db2bf5050060acc63611ea6c4670d (patch)
tree04e47e953e1695fe8a45955ff9d431a1a67be5cd /drivers/gpu/drm/msm
parentd145dd78d7e72ac07c84f6919283569e6b45a5c3 (diff)
drm/msm/mdp5: Add START signal to kick off certain pipelines
Some interfaces (WB, DSI Command Mode) need to be kicked off through a START Signal. This signal needs to be sent at the right time and requests in some cases to keep track of the pipeline status (eg: whether pipeline registers are flushed AND output WB buffers are ready, in case of WB interface). Signed-off-by: Stephane Viau <sviau@codeaurora.org> Signed-off-by: Rob Clark <robdclark@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c31
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c247
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h72
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
7 files changed, 276 insertions, 97 deletions
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index b0a44310cf2a..1eacea72c5eb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -31,6 +31,7 @@ const struct mdp5_cfg_hw msm8x74_config = {
31 .ctl = { 31 .ctl = {
32 .count = 5, 32 .count = 5,
33 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 33 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
34 .flush_hw_mask = 0x0003ffff,
34 }, 35 },
35 .pipe_vig = { 36 .pipe_vig = {
36 .count = 3, 37 .count = 3,
@@ -78,6 +79,7 @@ const struct mdp5_cfg_hw apq8084_config = {
78 .ctl = { 79 .ctl = {
79 .count = 5, 80 .count = 5,
80 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 81 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
82 .flush_hw_mask = 0x003fffff,
81 }, 83 },
82 .pipe_vig = { 84 .pipe_vig = {
83 .count = 4, 85 .count = 4,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index 4e90740c9749..69e35aca80a7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -44,6 +44,11 @@ struct mdp5_lm_block {
44 uint32_t nb_stages; /* number of stages per blender */ 44 uint32_t nb_stages; /* number of stages per blender */
45}; 45};
46 46
47struct mdp5_ctl_block {
48 MDP5_SUB_BLOCK_DEFINITION;
49 uint32_t flush_hw_mask; /* FLUSH register's hardware mask */
50};
51
47struct mdp5_smp_block { 52struct mdp5_smp_block {
48 int mmb_count; /* number of SMP MMBs */ 53 int mmb_count; /* number of SMP MMBs */
49 int mmb_size; /* MMB: size in bytes */ 54 int mmb_size; /* MMB: size in bytes */
@@ -55,7 +60,7 @@ struct mdp5_cfg_hw {
55 char *name; 60 char *name;
56 61
57 struct mdp5_smp_block smp; 62 struct mdp5_smp_block smp;
58 struct mdp5_sub_block ctl; 63 struct mdp5_ctl_block ctl;
59 struct mdp5_sub_block pipe_vig; 64 struct mdp5_sub_block pipe_vig;
60 struct mdp5_sub_block pipe_rgb; 65 struct mdp5_sub_block pipe_rgb;
61 struct mdp5_sub_block pipe_dma; 66 struct mdp5_sub_block pipe_dma;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 15136f17f8ce..9527ad112446 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -82,8 +82,6 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
82 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); 82 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
83} 83}
84 84
85#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
86
87static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask) 85static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
88{ 86{
89 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 87 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -110,8 +108,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
110 drm_atomic_crtc_for_each_plane(plane, crtc) { 108 drm_atomic_crtc_for_each_plane(plane, crtc) {
111 flush_mask |= mdp5_plane_get_flush(plane); 109 flush_mask |= mdp5_plane_get_flush(plane);
112 } 110 }
113 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl); 111
114 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm); 112 flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
115 113
116 crtc_flush(crtc, flush_mask); 114 crtc_flush(crtc, flush_mask);
117} 115}
@@ -442,13 +440,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
442 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 440 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
443 struct drm_device *dev = crtc->dev; 441 struct drm_device *dev = crtc->dev;
444 struct mdp5_kms *mdp5_kms = get_kms(crtc); 442 struct mdp5_kms *mdp5_kms = get_kms(crtc);
445 struct drm_gem_object *cursor_bo, *old_bo; 443 struct drm_gem_object *cursor_bo, *old_bo = NULL;
446 uint32_t blendcfg, cursor_addr, stride; 444 uint32_t blendcfg, cursor_addr, stride;
447 int ret, bpp, lm; 445 int ret, bpp, lm;
448 unsigned int depth; 446 unsigned int depth;
449 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; 447 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
450 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 448 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
451 uint32_t roi_w, roi_h; 449 uint32_t roi_w, roi_h;
450 bool cursor_enable = true;
452 unsigned long flags; 451 unsigned long flags;
453 452
454 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) { 453 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -461,7 +460,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
461 460
462 if (!handle) { 461 if (!handle) {
463 DBG("Cursor off"); 462 DBG("Cursor off");
464 return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false); 463 cursor_enable = false;
464 goto set_cursor;
465 } 465 }
466 466
467 cursor_bo = drm_gem_object_lookup(dev, file, handle); 467 cursor_bo = drm_gem_object_lookup(dev, file, handle);
@@ -502,11 +502,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
502 502
503 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); 503 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
504 504
505 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true); 505set_cursor:
506 if (ret) 506 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
507 if (ret) {
508 dev_err(dev->dev, "failed to %sable cursor: %d\n",
509 cursor_enable ? "en" : "dis", ret);
507 goto end; 510 goto end;
511 }
508 512
509 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
510 crtc_flush(crtc, flush_mask); 513 crtc_flush(crtc, flush_mask);
511 514
512end: 515end:
@@ -628,11 +631,13 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
628int mdp5_crtc_get_lm(struct drm_crtc *crtc) 631int mdp5_crtc_get_lm(struct drm_crtc *crtc)
629{ 632{
630 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 633 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
634 return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
635}
631 636
632 if (WARN_ON(!crtc)) 637struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
633 return -EINVAL; 638{
634 639 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
635 return mdp5_crtc->lm; 640 return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
636} 641}
637 642
638/* initialize crtc */ 643/* initialize crtc */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index a548113a8f68..7c0adf54e3e5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and 5 * it under the terms of the GNU General Public License version 2 and
@@ -35,18 +35,16 @@
35 35
36struct op_mode { 36struct op_mode {
37 struct mdp5_interface intf; 37 struct mdp5_interface intf;
38 /* 38
39 * TODO: add a state variable to control the pipeline 39 bool encoder_enabled;
40 * 40 uint32_t start_mask;
41 * eg: WB interface needs both buffer addresses to be committed +
42 * output buffers ready to be written into, before we can START.
43 */
44}; 41};
45 42
46struct mdp5_ctl { 43struct mdp5_ctl {
47 struct mdp5_ctl_manager *ctlm; 44 struct mdp5_ctl_manager *ctlm;
48 45
49 u32 id; 46 u32 id;
47 int lm;
50 48
51 /* whether this CTL has been allocated or not: */ 49 /* whether this CTL has been allocated or not: */
52 bool busy; 50 bool busy;
@@ -58,8 +56,8 @@ struct mdp5_ctl {
58 spinlock_t hw_lock; 56 spinlock_t hw_lock;
59 u32 reg_offset; 57 u32 reg_offset;
60 58
61 /* flush mask used to commit CTL registers */ 59 /* when do CTL registers need to be flushed? (mask of trigger bits) */
62 u32 flush_mask; 60 u32 pending_ctl_trigger;
63 61
64 bool cursor_on; 62 bool cursor_on;
65 63
@@ -73,6 +71,9 @@ struct mdp5_ctl_manager {
73 u32 nlm; 71 u32 nlm;
74 u32 nctl; 72 u32 nctl;
75 73
74 /* to filter out non-present bits in the current hardware config */
75 u32 flush_hw_mask;
76
76 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */ 77 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
77 spinlock_t pool_lock; 78 spinlock_t pool_lock;
78 struct mdp5_ctl ctls[MAX_CTL]; 79 struct mdp5_ctl ctls[MAX_CTL];
@@ -174,6 +175,9 @@ int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
174 175
175 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf)); 176 memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
176 177
178 ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
179 mdp_ctl_flush_mask_encoder(intf);
180
177 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ 181 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
178 if (!mdp5_cfg_intf_is_virtual(intf->type)) 182 if (!mdp5_cfg_intf_is_virtual(intf->type))
179 set_display_intf(mdp5_kms, intf); 183 set_display_intf(mdp5_kms, intf);
@@ -183,14 +187,90 @@ int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
183 return 0; 187 return 0;
184} 188}
185 189
186int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable) 190static bool start_signal_needed(struct mdp5_ctl *ctl)
191{
192 struct op_mode *pipeline = &ctl->pipeline;
193
194 if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
195 return false;
196
197 switch (pipeline->intf.type) {
198 case INTF_WB:
199 return true;
200 case INTF_DSI:
201 return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
202 default:
203 return false;
204 }
205}
206
207/*
208 * send_start_signal() - Overlay Processor Start Signal
209 *
210 * For a given control operation (display pipeline), a START signal needs to be
211 * executed in order to kick off operation and activate all layers.
212 * e.g.: DSI command mode, Writeback
213 */
214static void send_start_signal(struct mdp5_ctl *ctl)
215{
216 unsigned long flags;
217
218 spin_lock_irqsave(&ctl->hw_lock, flags);
219 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
220 spin_unlock_irqrestore(&ctl->hw_lock, flags);
221}
222
223static void refill_start_mask(struct mdp5_ctl *ctl)
224{
225 struct op_mode *pipeline = &ctl->pipeline;
226 struct mdp5_interface *intf = &ctl->pipeline.intf;
227
228 pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
229
230 /*
231 * Writeback encoder needs to program & flush
232 * address registers for each page flip..
233 */
234 if (intf->type == INTF_WB)
235 pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
236}
237
238/**
239 * mdp5_ctl_set_encoder_state() - set the encoder state
240 *
241 * @enable: true, when encoder is ready for data streaming; false, otherwise.
242 *
243 * Note:
244 * This encoder state is needed to trigger START signal (data path kickoff).
245 */
246int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
247{
248 if (WARN_ON(!ctl))
249 return -EINVAL;
250
251 ctl->pipeline.encoder_enabled = enabled;
252 DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
253
254 if (start_signal_needed(ctl)) {
255 send_start_signal(ctl);
256 refill_start_mask(ctl);
257 }
258
259 return 0;
260}
261
262/*
263 * Note:
264 * CTL registers need to be flushed after calling this function
265 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
266 */
267int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
187{ 268{
188 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 269 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
189 unsigned long flags; 270 unsigned long flags;
190 u32 blend_cfg; 271 u32 blend_cfg;
191 int lm; 272 int lm = ctl->lm;
192 273
193 lm = mdp5_crtc_get_lm(ctl->crtc);
194 if (unlikely(WARN_ON(lm < 0))) { 274 if (unlikely(WARN_ON(lm < 0))) {
195 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", 275 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
196 ctl->id, lm); 276 ctl->id, lm);
@@ -210,12 +290,12 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
210 290
211 spin_unlock_irqrestore(&ctl->hw_lock, flags); 291 spin_unlock_irqrestore(&ctl->hw_lock, flags);
212 292
293 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
213 ctl->cursor_on = enable; 294 ctl->cursor_on = enable;
214 295
215 return 0; 296 return 0;
216} 297}
217 298
218
219int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg) 299int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
220{ 300{
221 unsigned long flags; 301 unsigned long flags;
@@ -229,37 +309,133 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
229 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg); 309 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
230 spin_unlock_irqrestore(&ctl->hw_lock, flags); 310 spin_unlock_irqrestore(&ctl->hw_lock, flags);
231 311
312 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
313
232 return 0; 314 return 0;
233} 315}
234 316
317u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
318{
319 /* these are dummy bits for now, but will appear in next chipsets: */
320#define MDP5_CTL_FLUSH_TIMING_0 0x80000000
321#define MDP5_CTL_FLUSH_TIMING_1 0x40000000
322#define MDP5_CTL_FLUSH_TIMING_2 0x20000000
323#define MDP5_CTL_FLUSH_TIMING_3 0x10000000
324#define MDP5_CTL_FLUSH_WB 0x00010000
325
326 if (intf->type == INTF_WB)
327 return MDP5_CTL_FLUSH_WB;
328
329 switch (intf->num) {
330 case 0: return MDP5_CTL_FLUSH_TIMING_0;
331 case 1: return MDP5_CTL_FLUSH_TIMING_1;
332 case 2: return MDP5_CTL_FLUSH_TIMING_2;
333 case 3: return MDP5_CTL_FLUSH_TIMING_3;
334 default: return 0;
335 }
336}
337
338u32 mdp_ctl_flush_mask_cursor(int cursor_id)
339{
340 /* these are dummy bits for now, but will appear in next chipsets: */
341#define MDP5_CTL_FLUSH_CURSOR_0 0x00400000
342#define MDP5_CTL_FLUSH_CURSOR_1 0x00800000
343
344 switch (cursor_id) {
345 case 0: return MDP5_CTL_FLUSH_CURSOR_0;
346 case 1: return MDP5_CTL_FLUSH_CURSOR_1;
347 default: return 0;
348 }
349}
350
351u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
352{
353 switch (pipe) {
354 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
355 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
356 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
357 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
358 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
359 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
360 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
361 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
362 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
363 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
364 default: return 0;
365 }
366}
367
368u32 mdp_ctl_flush_mask_lm(int lm)
369{
370 switch (lm) {
371 case 0: return MDP5_CTL_FLUSH_LM0;
372 case 1: return MDP5_CTL_FLUSH_LM1;
373 case 2: return MDP5_CTL_FLUSH_LM2;
374 case 5: return MDP5_CTL_FLUSH_LM5;
375 default: return 0;
376 }
377}
378
379static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
380{
381 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
382 u32 sw_mask = 0;
383#define BIT_NEEDS_SW_FIX(bit) \
384 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
385
386 /* for some targets, cursor bit is the same as LM bit */
387 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
388 sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
389
390 return sw_mask;
391}
392
393/**
394 * mdp5_ctl_commit() - Register Flush
395 *
396 * The flush register is used to indicate several registers are all
397 * programmed, and are safe to update to the back copy of the double
398 * buffered registers.
399 *
400 * Some registers FLUSH bits are shared when the hardware does not have
401 * dedicated bits for them; handling these is the job of fix_sw_flush().
402 *
403 * CTL registers need to be flushed in some circumstances; if that is the
404 * case, some trigger bits will be present in both flush mask and
405 * ctl->pending_ctl_trigger.
406 */
235int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) 407int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
236{ 408{
237 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 409 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
410 struct op_mode *pipeline = &ctl->pipeline;
238 unsigned long flags; 411 unsigned long flags;
239 412
240 if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) { 413 pipeline->start_mask &= ~flush_mask;
241 int lm = mdp5_crtc_get_lm(ctl->crtc);
242 414
243 if (unlikely(WARN_ON(lm < 0))) { 415 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
244 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d", 416 pipeline->start_mask, ctl->pending_ctl_trigger);
245 ctl->id, lm);
246 return -EINVAL;
247 }
248 417
249 /* for current targets, cursor bit is the same as LM bit */ 418 if (ctl->pending_ctl_trigger & flush_mask) {
250 flush_mask |= mdp_ctl_flush_mask_lm(lm); 419 flush_mask |= MDP5_CTL_FLUSH_CTL;
420 ctl->pending_ctl_trigger = 0;
251 } 421 }
252 422
253 spin_lock_irqsave(&ctl->hw_lock, flags); 423 flush_mask |= fix_sw_flush(ctl, flush_mask);
254 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
255 spin_unlock_irqrestore(&ctl->hw_lock, flags);
256 424
257 return 0; 425 flush_mask &= ctl_mgr->flush_hw_mask;
258}
259 426
260u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl) 427 if (flush_mask) {
261{ 428 spin_lock_irqsave(&ctl->hw_lock, flags);
262 return ctl->flush_mask; 429 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
430 spin_unlock_irqrestore(&ctl->hw_lock, flags);
431 }
432
433 if (start_signal_needed(ctl)) {
434 send_start_signal(ctl);
435 refill_start_mask(ctl);
436 }
437
438 return 0;
263} 439}
264 440
265void mdp5_ctl_release(struct mdp5_ctl *ctl) 441void mdp5_ctl_release(struct mdp5_ctl *ctl)
@@ -280,6 +456,11 @@ void mdp5_ctl_release(struct mdp5_ctl *ctl)
280 DBG("CTL %d released", ctl->id); 456 DBG("CTL %d released", ctl->id);
281} 457}
282 458
459int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
460{
461 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
462}
463
283/* 464/*
284 * mdp5_ctl_request() - CTL dynamic allocation 465 * mdp5_ctl_request() - CTL dynamic allocation
285 * 466 *
@@ -307,8 +488,10 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
307 488
308 ctl = &ctl_mgr->ctls[c]; 489 ctl = &ctl_mgr->ctls[c];
309 490
491 ctl->lm = mdp5_crtc_get_lm(crtc);
310 ctl->crtc = crtc; 492 ctl->crtc = crtc;
311 ctl->busy = true; 493 ctl->busy = true;
494 ctl->pending_ctl_trigger = 0;
312 DBG("CTL %d allocated", ctl->id); 495 DBG("CTL %d allocated", ctl->id);
313 496
314unlock: 497unlock:
@@ -339,7 +522,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
339 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg) 522 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
340{ 523{
341 struct mdp5_ctl_manager *ctl_mgr; 524 struct mdp5_ctl_manager *ctl_mgr;
342 const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl; 525 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
343 unsigned long flags; 526 unsigned long flags;
344 int c, ret; 527 int c, ret;
345 528
@@ -361,6 +544,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
361 ctl_mgr->dev = dev; 544 ctl_mgr->dev = dev;
362 ctl_mgr->nlm = hw_cfg->lm.count; 545 ctl_mgr->nlm = hw_cfg->lm.count;
363 ctl_mgr->nctl = ctl_cfg->count; 546 ctl_mgr->nctl = ctl_cfg->count;
547 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
364 spin_lock_init(&ctl_mgr->pool_lock); 548 spin_lock_init(&ctl_mgr->pool_lock);
365 549
366 /* initialize each CTL of the pool: */ 550 /* initialize each CTL of the pool: */
@@ -376,7 +560,6 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
376 ctl->ctlm = ctl_mgr; 560 ctl->ctlm = ctl_mgr;
377 ctl->id = c; 561 ctl->id = c;
378 ctl->reg_offset = ctl_cfg->base[c]; 562 ctl->reg_offset = ctl_cfg->base[c];
379 ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
380 ctl->busy = false; 563 ctl->busy = false;
381 spin_lock_init(&ctl->hw_lock); 564 spin_lock_init(&ctl->hw_lock);
382 } 565 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 271d5ac429be..7a62000994a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -33,20 +33,13 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions. 33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
34 */ 34 */
35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc); 35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
36int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
36 37
37struct mdp5_interface; 38struct mdp5_interface;
38int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf); 39int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
40int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
39 41
40int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable); 42int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
41
42/* @blend_cfg: see LM blender config definition below */
43int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
44
45/* @flush_mask: see CTL flush masks definitions below */
46int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
47u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
48
49void mdp5_ctl_release(struct mdp5_ctl *ctl);
50 43
51/* 44/*
52 * blend_cfg (LM blender config): 45 * blend_cfg (LM blender config):
@@ -73,51 +66,32 @@ static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
73} 66}
74 67
75/* 68/*
76 * flush_mask (CTL flush masks): 69 * mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
70 *
71 * @blend_cfg: see LM blender config definition below
77 * 72 *
78 * The following functions allow each DRM entity to get and store 73 * Note:
79 * their own flush mask. 74 * CTL registers need to be flushed after calling this function
80 * Once stored, these masks will then be accessed through each DRM's 75 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
81 * interface and used by the caller of mdp5_ctl_commit() to specify
82 * which block(s) need to be flushed through @flush_mask parameter.
83 */ 76 */
77int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
84 78
85#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000 79/**
80 * mdp_ctl_flush_mask...() - Register FLUSH masks
81 *
82 * These masks are used to specify which block(s) need to be flushed
83 * through @flush_mask parameter in mdp5_ctl_commit(.., flush_mask).
84 */
85u32 mdp_ctl_flush_mask_lm(int lm);
86u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe);
87u32 mdp_ctl_flush_mask_cursor(int cursor_id);
88u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
86 89
87static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id) 90/* @flush_mask: see CTL flush masks definitions below */
88{ 91int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
89 /* TODO: use id once multiple cursor support is present */
90 (void)cursor_id;
91 92
92 return MDP5_CTL_FLUSH_CURSOR_DUMMY; 93void mdp5_ctl_release(struct mdp5_ctl *ctl);
93}
94 94
95static inline u32 mdp_ctl_flush_mask_lm(int lm)
96{
97 switch (lm) {
98 case 0: return MDP5_CTL_FLUSH_LM0;
99 case 1: return MDP5_CTL_FLUSH_LM1;
100 case 2: return MDP5_CTL_FLUSH_LM2;
101 case 5: return MDP5_CTL_FLUSH_LM5;
102 default: return 0;
103 }
104}
105 95
106static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
107{
108 switch (pipe) {
109 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
110 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
111 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
112 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
113 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
114 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
115 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
116 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
117 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
118 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
119 default: return 0;
120 }
121}
122 96
123#endif /* __MDP5_CTL_H__ */ 97#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index b18b381502a3..2ef6d1b0a218 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -217,12 +217,15 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
217 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ 217 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
218 218
219 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 219 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
220
221 mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
220} 222}
221 223
222static void mdp5_encoder_disable(struct drm_encoder *encoder) 224static void mdp5_encoder_disable(struct drm_encoder *encoder)
223{ 225{
224 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 226 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
225 struct mdp5_kms *mdp5_kms = get_kms(encoder); 227 struct mdp5_kms *mdp5_kms = get_kms(encoder);
228 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
226 int lm = mdp5_crtc_get_lm(encoder->crtc); 229 int lm = mdp5_crtc_get_lm(encoder->crtc);
227 struct mdp5_interface *intf = &mdp5_encoder->intf; 230 struct mdp5_interface *intf = &mdp5_encoder->intf;
228 int intfn = mdp5_encoder->intf.num; 231 int intfn = mdp5_encoder->intf.num;
@@ -231,9 +234,12 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
231 if (WARN_ON(!mdp5_encoder->enabled)) 234 if (WARN_ON(!mdp5_encoder->enabled))
232 return; 235 return;
233 236
237 mdp5_ctl_set_encoder_state(ctl, false);
238
234 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 239 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
235 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); 240 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
236 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 241 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
242 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
237 243
238 /* 244 /*
239 * Wait for a vsync so we know the ENABLE=0 latched before 245 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -254,18 +260,21 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
254{ 260{
255 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); 261 struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
256 struct mdp5_kms *mdp5_kms = get_kms(encoder); 262 struct mdp5_kms *mdp5_kms = get_kms(encoder);
263 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
264 struct mdp5_interface *intf = &mdp5_encoder->intf;
257 int intfn = mdp5_encoder->intf.num; 265 int intfn = mdp5_encoder->intf.num;
258 unsigned long flags; 266 unsigned long flags;
259 267
260 if (WARN_ON(mdp5_encoder->enabled)) 268 if (WARN_ON(mdp5_encoder->enabled))
261 return; 269 return;
262 270
263 mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
264
265 bs_set(mdp5_encoder, 1); 271 bs_set(mdp5_encoder, 1);
266 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 272 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
267 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); 273 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
268 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 274 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
275 mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
276
277 mdp5_ctl_set_encoder_state(ctl, true);
269 278
270 mdp5_encoder->enabled = true; 279 mdp5_encoder->enabled = true;
271} 280}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 1a6aa494d92f..690edfde4ba1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -230,6 +230,7 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
230uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 230uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
231 231
232int mdp5_crtc_get_lm(struct drm_crtc *crtc); 232int mdp5_crtc_get_lm(struct drm_crtc *crtc);
233struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
233void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 234void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
234void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf); 235void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
235struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 236struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,