aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Clark <robdclark@gmail.com>2018-02-19 08:17:06 -0500
committerRob Clark <robdclark@gmail.com>2018-03-19 06:33:37 -0400
commitf9cb8d8d836e155f361c3f1bbe0802ae1f98a17e (patch)
tree21ebe84591bc219d7a362b4347bab42b228ab709
parent79d57bf6fa3bcc0ec5fc3b8140c4df1d696f593b (diff)
drm/msm/mdp5: rework CTL START signal handling
For DSI cmd-mode and writeback, we need to write the CTL's START register to kick things off, but we only want to do that once both the encoder and the crtc have a chance to write their corresponding flush bits. The difficulty is that when there is a full modeset (ie. encoder state has changed) we want to defer the start until encoder->enable(). But if only plane's have changed, we want to do this from crtc->commit(). The start_mask was a previous attempt to handle this, but it didn't really do the right thing since atomic conversion. Instead track in the crtc state that the start should be deferred, set to try from encoder's (or in future writeback's) atomic_check(). This way the state is part of the atomic state, and rollback can work properly if an atomic test fails. Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c52
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c2
7 files changed, 35 insertions, 44 deletions
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index 1abc7f5c345c..d6f79dc755b4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -159,7 +159,7 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
159 pingpong_tearcheck_disable(encoder); 159 pingpong_tearcheck_disable(encoder);
160 160
161 mdp5_ctl_set_encoder_state(ctl, pipeline, false); 161 mdp5_ctl_set_encoder_state(ctl, pipeline, false);
162 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); 162 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
163 163
164 bs_set(mdp5_cmd_enc, 0); 164 bs_set(mdp5_cmd_enc, 0);
165 165
@@ -180,7 +180,7 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
180 if (pingpong_tearcheck_enable(encoder)) 180 if (pingpong_tearcheck_enable(encoder))
181 return; 181 return;
182 182
183 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); 183 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
184 184
185 mdp5_ctl_set_encoder_state(ctl, pipeline, true); 185 mdp5_ctl_set_encoder_state(ctl, pipeline, true);
186 186
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 8c5ed0b59e46..91c829a2cc85 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -97,9 +97,13 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
97 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); 97 struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
98 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 98 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
99 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline; 99 struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
100 bool start = !mdp5_cstate->defer_start;
101
102 mdp5_cstate->defer_start = false;
100 103
101 DBG("%s: flush=%08x", crtc->name, flush_mask); 104 DBG("%s: flush=%08x", crtc->name, flush_mask);
102 return mdp5_ctl_commit(ctl, pipeline, flush_mask); 105
106 return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
103} 107}
104 108
105/* 109/*
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 439e0a300e25..1197f060c5c6 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -41,7 +41,9 @@ struct mdp5_ctl {
41 u32 status; 41 u32 status;
42 42
43 bool encoder_enabled; 43 bool encoder_enabled;
44 uint32_t start_mask; 44
45 /* pending flush_mask bits */
46 u32 flush_mask;
45 47
46 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */ 48 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
47 spinlock_t hw_lock; 49 spinlock_t hw_lock;
@@ -173,16 +175,8 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
173 175
174int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) 176int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
175{ 177{
176 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 178 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
177 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
178 struct mdp5_interface *intf = pipeline->intf; 179 struct mdp5_interface *intf = pipeline->intf;
179 struct mdp5_hw_mixer *mixer = pipeline->mixer;
180 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
181
182 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
183 mdp_ctl_flush_mask_encoder(intf);
184 if (r_mixer)
185 ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
186 180
187 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */ 181 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
188 if (!mdp5_cfg_intf_is_virtual(intf->type)) 182 if (!mdp5_cfg_intf_is_virtual(intf->type))
@@ -198,7 +192,7 @@ static bool start_signal_needed(struct mdp5_ctl *ctl,
198{ 192{
199 struct mdp5_interface *intf = pipeline->intf; 193 struct mdp5_interface *intf = pipeline->intf;
200 194
201 if (!ctl->encoder_enabled || ctl->start_mask != 0) 195 if (!ctl->encoder_enabled)
202 return false; 196 return false;
203 197
204 switch (intf->type) { 198 switch (intf->type) {
@@ -227,25 +221,6 @@ static void send_start_signal(struct mdp5_ctl *ctl)
227 spin_unlock_irqrestore(&ctl->hw_lock, flags); 221 spin_unlock_irqrestore(&ctl->hw_lock, flags);
228} 222}
229 223
230static void refill_start_mask(struct mdp5_ctl *ctl,
231 struct mdp5_pipeline *pipeline)
232{
233 struct mdp5_interface *intf = pipeline->intf;
234 struct mdp5_hw_mixer *mixer = pipeline->mixer;
235 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
236
237 ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
238 if (r_mixer)
239 ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
240
241 /*
242 * Writeback encoder needs to program & flush
243 * address registers for each page flip..
244 */
245 if (intf->type == INTF_WB)
246 ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
247}
248
249/** 224/**
250 * mdp5_ctl_set_encoder_state() - set the encoder state 225 * mdp5_ctl_set_encoder_state() - set the encoder state
251 * 226 *
@@ -268,7 +243,6 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
268 243
269 if (start_signal_needed(ctl, pipeline)) { 244 if (start_signal_needed(ctl, pipeline)) {
270 send_start_signal(ctl); 245 send_start_signal(ctl);
271 refill_start_mask(ctl, pipeline);
272 } 246 }
273 247
274 return 0; 248 return 0;
@@ -557,17 +531,14 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
557 */ 531 */
558u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, 532u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
559 struct mdp5_pipeline *pipeline, 533 struct mdp5_pipeline *pipeline,
560 u32 flush_mask) 534 u32 flush_mask, bool start)
561{ 535{
562 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 536 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
563 unsigned long flags; 537 unsigned long flags;
564 u32 flush_id = ctl->id; 538 u32 flush_id = ctl->id;
565 u32 curr_ctl_flush_mask; 539 u32 curr_ctl_flush_mask;
566 540
567 ctl->start_mask &= ~flush_mask; 541 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
568
569 VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
570 ctl->start_mask, ctl->pending_ctl_trigger);
571 542
572 if (ctl->pending_ctl_trigger & flush_mask) { 543 if (ctl->pending_ctl_trigger & flush_mask) {
573 flush_mask |= MDP5_CTL_FLUSH_CTL; 544 flush_mask |= MDP5_CTL_FLUSH_CTL;
@@ -582,6 +553,14 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
582 553
583 fix_for_single_flush(ctl, &flush_mask, &flush_id); 554 fix_for_single_flush(ctl, &flush_mask, &flush_id);
584 555
556 if (!start) {
557 ctl->flush_mask |= flush_mask;
558 return curr_ctl_flush_mask;
559 } else {
560 flush_mask |= ctl->flush_mask;
561 ctl->flush_mask = 0;
562 }
563
585 if (flush_mask) { 564 if (flush_mask) {
586 spin_lock_irqsave(&ctl->hw_lock, flags); 565 spin_lock_irqsave(&ctl->hw_lock, flags);
587 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); 566 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
@@ -590,7 +569,6 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
590 569
591 if (start_signal_needed(ctl, pipeline)) { 570 if (start_signal_needed(ctl, pipeline)) {
592 send_start_signal(ctl); 571 send_start_signal(ctl);
593 refill_start_mask(ctl, pipeline);
594 } 572 }
595 573
596 return curr_ctl_flush_mask; 574 return curr_ctl_flush_mask;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
index b63120388dc6..403b0db0fa4c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -78,7 +78,7 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
78 78
79/* @flush_mask: see CTL flush masks definitions below */ 79/* @flush_mask: see CTL flush masks definitions below */
80u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, 80u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
81 u32 flush_mask); 81 u32 flush_mask, bool start);
82u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl); 82u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
83 83
84 84
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 36ad3cbe5f79..9af94e35f678 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -228,7 +228,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
228 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 228 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
229 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0); 229 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
230 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 230 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
231 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); 231 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
232 232
233 /* 233 /*
234 * Wait for a vsync so we know the ENABLE=0 latched before 234 * Wait for a vsync so we know the ENABLE=0 latched before
@@ -262,7 +262,7 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
262 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags); 262 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
263 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1); 263 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
264 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags); 264 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
265 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf)); 265 mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
266 266
267 mdp5_ctl_set_encoder_state(ctl, pipeline, true); 267 mdp5_ctl_set_encoder_state(ctl, pipeline, true);
268 268
@@ -319,6 +319,7 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
319 319
320 mdp5_cstate->ctl = ctl; 320 mdp5_cstate->ctl = ctl;
321 mdp5_cstate->pipeline.intf = intf; 321 mdp5_cstate->pipeline.intf = intf;
322 mdp5_cstate->defer_start = true;
322 323
323 return 0; 324 return 0;
324} 325}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index aeb94aa461b5..425a03d213e5 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -133,6 +133,14 @@ struct mdp5_crtc_state {
133 u32 pp_done_irqmask; 133 u32 pp_done_irqmask;
134 134
135 bool cmd_mode; 135 bool cmd_mode;
136
137 /* should we not write CTL[n].START register on flush? If the
138 * encoder has changed this is set to true, since encoder->enable()
139 * is called after crtc state is committed, but we only want to
140 * write the CTL[n].START register once. This lets us defer
141 * writing CTL[n].START until encoder->enable()
142 */
143 bool defer_start;
136}; 144};
137#define to_mdp5_crtc_state(x) \ 145#define to_mdp5_crtc_state(x) \
138 container_of(x, struct mdp5_crtc_state, base) 146 container_of(x, struct mdp5_crtc_state, base)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 98d4d7331767..5dc42d89b588 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -545,7 +545,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
545 545
546 ctl = mdp5_crtc_get_ctl(new_state->crtc); 546 ctl = mdp5_crtc_get_ctl(new_state->crtc);
547 547
548 mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane)); 548 mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
549 } 549 }
550 550
551 *to_mdp5_plane_state(plane->state) = 551 *to_mdp5_plane_state(plane->state) =