aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Clark <robdclark@chromium.org>2019-08-29 12:45:16 -0400
committerRob Clark <robdclark@chromium.org>2019-09-03 19:17:01 -0400
commit2d99ced787e3d0f251fa370d2aae83cf2085a8d9 (patch)
tree5af6cb9e1fd834af0b2fb75630e7a25e38a6510e
parente35a29d5c432504e6134c881dc4aaded7de7e717 (diff)
drm/msm: async commit support
Now that flush/wait/complete is decoupled from the "synchronous" part of atomic commit_tail(), add support to defer flush to a timer that expires shortly before vblank for async commits. In this way, multiple atomic commits (for example, cursor updates) can be coalesced into a single flush at the end of the frame. v2: don't hold lock over ->wait_flush(), to avoid locking interaction that was causing fps drop when combining page flips or non-async atomic commits and lots of legacy cursor updates Signed-off-by: Rob Clark <robdclark@chromium.org> Reviewed-by: Sean Paul <sean@poorly.run>
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c156
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h50
4 files changed, 210 insertions, 1 deletions
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 517d3a747778..55c8d72f8401 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -26,6 +26,95 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
26 return msm_framebuffer_prepare(new_state->fb, kms->aspace); 26 return msm_framebuffer_prepare(new_state->fb, kms->aspace);
27} 27}
28 28
29static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
30{
31 unsigned crtc_mask = BIT(crtc_idx);
32
33 mutex_lock(&kms->commit_lock);
34
35 if (!(kms->pending_crtc_mask & crtc_mask)) {
36 mutex_unlock(&kms->commit_lock);
37 return;
38 }
39
40 kms->pending_crtc_mask &= ~crtc_mask;
41
42 kms->funcs->enable_commit(kms);
43
44 /*
45 * Flush hardware updates:
46 */
47 DRM_DEBUG_ATOMIC("triggering async commit\n");
48 kms->funcs->flush_commit(kms, crtc_mask);
49 mutex_unlock(&kms->commit_lock);
50
51 /*
52 * Wait for flush to complete:
53 */
54 kms->funcs->wait_flush(kms, crtc_mask);
55
56 mutex_lock(&kms->commit_lock);
57 kms->funcs->complete_commit(kms, crtc_mask);
58 mutex_unlock(&kms->commit_lock);
59 kms->funcs->disable_commit(kms);
60}
61
62static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
63{
64 struct msm_pending_timer *timer = container_of(t,
65 struct msm_pending_timer, timer);
66 struct msm_drm_private *priv = timer->kms->dev->dev_private;
67
68 queue_work(priv->wq, &timer->work);
69
70 return HRTIMER_NORESTART;
71}
72
73static void msm_atomic_pending_work(struct work_struct *work)
74{
75 struct msm_pending_timer *timer = container_of(work,
76 struct msm_pending_timer, work);
77
78 msm_atomic_async_commit(timer->kms, timer->crtc_idx);
79}
80
81void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
82 struct msm_kms *kms, int crtc_idx)
83{
84 timer->kms = kms;
85 timer->crtc_idx = crtc_idx;
86 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
87 timer->timer.function = msm_atomic_pending_timer;
88 INIT_WORK(&timer->work, msm_atomic_pending_work);
89}
90
91static bool can_do_async(struct drm_atomic_state *state,
92 struct drm_crtc **async_crtc)
93{
94 struct drm_connector_state *connector_state;
95 struct drm_connector *connector;
96 struct drm_crtc_state *crtc_state;
97 struct drm_crtc *crtc;
98 int i, num_crtcs = 0;
99
100 if (!(state->legacy_cursor_update || state->async_update))
101 return false;
102
103 /* any connector change, means slow path: */
104 for_each_new_connector_in_state(state, connector, connector_state, i)
105 return false;
106
107 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
108 if (drm_atomic_crtc_needs_modeset(crtc_state))
109 return false;
110 if (++num_crtcs > 1)
111 return false;
112 *async_crtc = crtc;
113 }
114
115 return true;
116}
117
29/* Get bitmask of crtcs that will need to be flushed. The bitmask 118/* Get bitmask of crtcs that will need to be flushed. The bitmask
30 * can be used with for_each_crtc_mask() iterator, to iterate 119 * can be used with for_each_crtc_mask() iterator, to iterate
31 * effected crtcs without needing to preserve the atomic state. 120 * effected crtcs without needing to preserve the atomic state.
@@ -47,9 +136,25 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
47 struct drm_device *dev = state->dev; 136 struct drm_device *dev = state->dev;
48 struct msm_drm_private *priv = dev->dev_private; 137 struct msm_drm_private *priv = dev->dev_private;
49 struct msm_kms *kms = priv->kms; 138 struct msm_kms *kms = priv->kms;
139 struct drm_crtc *async_crtc = NULL;
50 unsigned crtc_mask = get_crtc_mask(state); 140 unsigned crtc_mask = get_crtc_mask(state);
141 bool async = kms->funcs->vsync_time &&
142 can_do_async(state, &async_crtc);
51 143
52 kms->funcs->enable_commit(kms); 144 kms->funcs->enable_commit(kms);
145
146 /*
147 * Ensure any previous (potentially async) commit has
148 * completed:
149 */
150 kms->funcs->wait_flush(kms, crtc_mask);
151
152 mutex_lock(&kms->commit_lock);
153
154 /*
155 * Now that there is no in-progress flush, prepare the
156 * current update:
157 */
53 kms->funcs->prepare_commit(kms, state); 158 kms->funcs->prepare_commit(kms, state);
54 159
55 /* 160 /*
@@ -59,6 +164,49 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
59 drm_atomic_helper_commit_planes(dev, state, 0); 164 drm_atomic_helper_commit_planes(dev, state, 0);
60 drm_atomic_helper_commit_modeset_enables(dev, state); 165 drm_atomic_helper_commit_modeset_enables(dev, state);
61 166
167 if (async) {
168 struct msm_pending_timer *timer =
169 &kms->pending_timers[drm_crtc_index(async_crtc)];
170
171 /* async updates are limited to single-crtc updates: */
172 WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
173
174 /*
175 * Start timer if we don't already have an update pending
176 * on this crtc:
177 */
178 if (!(kms->pending_crtc_mask & crtc_mask)) {
179 ktime_t vsync_time, wakeup_time;
180
181 kms->pending_crtc_mask |= crtc_mask;
182
183 vsync_time = kms->funcs->vsync_time(kms, async_crtc);
184 wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
185
186 hrtimer_start(&timer->timer, wakeup_time,
187 HRTIMER_MODE_ABS);
188 }
189
190 kms->funcs->disable_commit(kms);
191 mutex_unlock(&kms->commit_lock);
192
193 /*
194 * At this point, from drm core's perspective, we
195 * are done with the atomic update, so we can just
196 * go ahead and signal that it is done:
197 */
198 drm_atomic_helper_commit_hw_done(state);
199 drm_atomic_helper_cleanup_planes(dev, state);
200
201 return;
202 }
203
204 /*
205 * If there is any async flush pending on updated crtcs, fold
206 * them into the current flush.
207 */
208 kms->pending_crtc_mask &= ~crtc_mask;
209
62 /* 210 /*
63 * Flush hardware updates: 211 * Flush hardware updates:
64 */ 212 */
@@ -67,12 +215,18 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
67 kms->funcs->commit(kms, state); 215 kms->funcs->commit(kms, state);
68 } 216 }
69 kms->funcs->flush_commit(kms, crtc_mask); 217 kms->funcs->flush_commit(kms, crtc_mask);
218 mutex_unlock(&kms->commit_lock);
70 219
220 /*
221 * Wait for flush to complete:
222 */
71 kms->funcs->wait_flush(kms, crtc_mask); 223 kms->funcs->wait_flush(kms, crtc_mask);
224
225 mutex_lock(&kms->commit_lock);
72 kms->funcs->complete_commit(kms, crtc_mask); 226 kms->funcs->complete_commit(kms, crtc_mask);
227 mutex_unlock(&kms->commit_lock);
73 kms->funcs->disable_commit(kms); 228 kms->funcs->disable_commit(kms);
74 229
75 drm_atomic_helper_commit_hw_done(state); 230 drm_atomic_helper_commit_hw_done(state);
76
77 drm_atomic_helper_cleanup_planes(dev, state); 231 drm_atomic_helper_cleanup_planes(dev, state);
78} 232}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 996a508f762c..c84f0a8b3f2c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -473,6 +473,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
473 ddev->mode_config.normalize_zpos = true; 473 ddev->mode_config.normalize_zpos = true;
474 474
475 if (kms) { 475 if (kms) {
476 kms->dev = ddev;
476 ret = kms->funcs->hw_init(kms); 477 ret = kms->funcs->hw_init(kms);
477 if (ret) { 478 if (ret) {
478 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret); 479 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 1fa0974eb69f..71547e756e29 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -221,8 +221,12 @@ struct msm_format {
221 uint32_t pixel_format; 221 uint32_t pixel_format;
222}; 222};
223 223
224struct msm_pending_timer;
225
224int msm_atomic_prepare_fb(struct drm_plane *plane, 226int msm_atomic_prepare_fb(struct drm_plane *plane,
225 struct drm_plane_state *new_state); 227 struct drm_plane_state *new_state);
228void msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
229 struct msm_kms *kms, int crtc_idx);
226void msm_atomic_commit_tail(struct drm_atomic_state *state); 230void msm_atomic_commit_tail(struct drm_atomic_state *state);
227struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); 231struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
228void msm_atomic_state_clear(struct drm_atomic_state *state); 232void msm_atomic_state_clear(struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 85264a2f1f6d..ed8df60a32ad 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -33,6 +33,20 @@ struct msm_kms_funcs {
33 33
34 /* 34 /*
35 * Atomic commit handling: 35 * Atomic commit handling:
36 *
37 * Note that in the case of async commits, the funcs which take
38 * a crtc_mask (ie. ->flush_commit(), and ->complete_commit())
39 * might not be evenly balanced with ->prepare_commit(), however
40 * each crtc that effected by a ->prepare_commit() (potentially
41 * multiple times) will eventually (at end of vsync period) be
42 * flushed and completed.
43 *
44 * This has some implications about tracking of cleanup state,
45 * for example SMP blocks to release after commit completes. Ie.
46 * cleanup state should be also duplicated in the various
47 * duplicate_state() methods, as the current cleanup state at
48 * ->complete_commit() time may have accumulated cleanup work
49 * from multiple commits.
36 */ 50 */
37 51
38 /** 52 /**
@@ -46,6 +60,14 @@ struct msm_kms_funcs {
46 void (*disable_commit)(struct msm_kms *kms); 60 void (*disable_commit)(struct msm_kms *kms);
47 61
48 /** 62 /**
63 * If the kms backend supports async commit, it should implement
64 * this method to return the time of the next vsync. This is
65 * used to determine a time slightly before vsync, for the async
66 * commit timer to run and complete an async commit.
67 */
68 ktime_t (*vsync_time)(struct msm_kms *kms, struct drm_crtc *crtc);
69
70 /**
49 * Prepare for atomic commit. This is called after any previous 71 * Prepare for atomic commit. This is called after any previous
50 * (async or otherwise) commit has completed. 72 * (async or otherwise) commit has completed.
51 */ 73 */
@@ -109,20 +131,48 @@ struct msm_kms_funcs {
109#endif 131#endif
110}; 132};
111 133
134struct msm_kms;
135
136/*
137 * A per-crtc timer for pending async atomic flushes. Scheduled to expire
138 * shortly before vblank to flush pending async updates.
139 */
140struct msm_pending_timer {
141 struct hrtimer timer;
142 struct work_struct work;
143 struct msm_kms *kms;
144 unsigned crtc_idx;
145};
146
112struct msm_kms { 147struct msm_kms {
113 const struct msm_kms_funcs *funcs; 148 const struct msm_kms_funcs *funcs;
149 struct drm_device *dev;
114 150
115 /* irq number to be passed on to drm_irq_install */ 151 /* irq number to be passed on to drm_irq_install */
116 int irq; 152 int irq;
117 153
118 /* mapper-id used to request GEM buffer mapped for scanout: */ 154 /* mapper-id used to request GEM buffer mapped for scanout: */
119 struct msm_gem_address_space *aspace; 155 struct msm_gem_address_space *aspace;
156
157 /*
158 * For async commit, where ->flush_commit() and later happens
159 * from the crtc's pending_timer close to end of the frame:
160 */
161 struct mutex commit_lock;
162 unsigned pending_crtc_mask;
163 struct msm_pending_timer pending_timers[MAX_CRTCS];
120}; 164};
121 165
122static inline void msm_kms_init(struct msm_kms *kms, 166static inline void msm_kms_init(struct msm_kms *kms,
123 const struct msm_kms_funcs *funcs) 167 const struct msm_kms_funcs *funcs)
124{ 168{
169 unsigned i;
170
171 mutex_init(&kms->commit_lock);
125 kms->funcs = funcs; 172 kms->funcs = funcs;
173
174 for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
175 msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
126} 176}
127 177
128struct msm_kms *mdp4_kms_init(struct drm_device *dev); 178struct msm_kms *mdp4_kms_init(struct drm_device *dev);