aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c139
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
3 files changed, 8 insertions, 141 deletions
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 95c7868445dd..f0635c3da7f4 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -18,8 +18,6 @@
18#include "msm_drv.h" 18#include "msm_drv.h"
19#include "msm_gem.h" 19#include "msm_gem.h"
20#include "msm_kms.h" 20#include "msm_kms.h"
21#include "msm_gem.h"
22#include "msm_fence.h"
23 21
24static void msm_atomic_wait_for_commit_done(struct drm_device *dev, 22static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
25 struct drm_atomic_state *old_state) 23 struct drm_atomic_state *old_state)
@@ -59,7 +57,7 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
59 return msm_framebuffer_prepare(new_state->fb, kms->aspace); 57 return msm_framebuffer_prepare(new_state->fb, kms->aspace);
60} 58}
61 59
62static void msm_atomic_commit_tail(struct drm_atomic_state *state) 60void msm_atomic_commit_tail(struct drm_atomic_state *state)
63{ 61{
64 struct drm_device *dev = state->dev; 62 struct drm_device *dev = state->dev;
65 struct msm_drm_private *priv = dev->dev_private; 63 struct msm_drm_private *priv = dev->dev_private;
@@ -73,19 +71,6 @@ static void msm_atomic_commit_tail(struct drm_atomic_state *state)
73 71
74 drm_atomic_helper_commit_modeset_enables(dev, state); 72 drm_atomic_helper_commit_modeset_enables(dev, state);
75 73
76 /* NOTE: _wait_for_vblanks() only waits for vblank on
77 * enabled CRTCs. So we end up faulting when disabling
78 * due to (potentially) unref'ing the outgoing fb's
79 * before the vblank when the disable has latched.
80 *
81 * But if it did wait on disabled (or newly disabled)
82 * CRTCs, that would be racy (ie. we could have missed
83 * the irq. We need some way to poll for pipe shut
84 * down. Or just live with occasionally hitting the
85 * timeout in the CRTC disable path (which really should
86 * not be critical path)
87 */
88
89 msm_atomic_wait_for_commit_done(dev, state); 74 msm_atomic_wait_for_commit_done(dev, state);
90 75
91 kms->funcs->complete_commit(kms, state); 76 kms->funcs->complete_commit(kms, state);
@@ -96,125 +81,3 @@ static void msm_atomic_commit_tail(struct drm_atomic_state *state)
96 81
97 drm_atomic_helper_cleanup_planes(dev, state); 82 drm_atomic_helper_cleanup_planes(dev, state);
98} 83}
99
100/* The (potentially) asynchronous part of the commit. At this point
101 * nothing can fail short of armageddon.
102 */
103static void commit_tail(struct drm_atomic_state *state)
104{
105 drm_atomic_helper_wait_for_fences(state->dev, state, false);
106
107 drm_atomic_helper_wait_for_dependencies(state);
108
109 msm_atomic_commit_tail(state);
110
111 drm_atomic_helper_commit_cleanup_done(state);
112
113 drm_atomic_state_put(state);
114}
115
116static void commit_work(struct work_struct *work)
117{
118 struct drm_atomic_state *state = container_of(work,
119 struct drm_atomic_state,
120 commit_work);
121 commit_tail(state);
122}
123
124/**
125 * drm_atomic_helper_commit - commit validated state object
126 * @dev: DRM device
127 * @state: the driver state object
128 * @nonblock: nonblocking commit
129 *
130 * This function commits a with drm_atomic_helper_check() pre-validated state
131 * object. This can still fail when e.g. the framebuffer reservation fails.
132 *
133 * RETURNS
134 * Zero for success or -errno.
135 */
136int msm_atomic_commit(struct drm_device *dev,
137 struct drm_atomic_state *state, bool nonblock)
138{
139 struct msm_drm_private *priv = dev->dev_private;
140 struct drm_crtc *crtc;
141 struct drm_crtc_state *crtc_state;
142 struct drm_plane *plane;
143 struct drm_plane_state *old_plane_state, *new_plane_state;
144 int i, ret;
145
146 /*
147 * Note that plane->atomic_async_check() should fail if we need
148 * to re-assign hwpipe or anything that touches global atomic
149 * state, so we'll never go down the async update path in those
150 * cases.
151 */
152 if (state->async_update) {
153 ret = drm_atomic_helper_prepare_planes(dev, state);
154 if (ret)
155 return ret;
156
157 drm_atomic_helper_async_commit(dev, state);
158 drm_atomic_helper_cleanup_planes(dev, state);
159 return 0;
160 }
161
162 ret = drm_atomic_helper_setup_commit(state, nonblock);
163 if (ret)
164 return ret;
165
166 INIT_WORK(&state->commit_work, commit_work);
167
168 ret = drm_atomic_helper_prepare_planes(dev, state);
169 if (ret)
170 return ret;
171
172 if (!nonblock) {
173 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
174 if (ret)
175 goto error;
176 }
177
178 /*
179 * This is the point of no return - everything below never fails except
180 * when the hw goes bonghits. Which means we can commit the new state on
181 * the software side now.
182 *
183 * swap driver private state while still holding state_lock
184 */
185 BUG_ON(drm_atomic_helper_swap_state(state, true) < 0);
186
187 /*
188 * This is the point of no return - everything below never fails except
189 * when the hw goes bonghits. Which means we can commit the new state on
190 * the software side now.
191 */
192
193 /*
194 * Everything below can be run asynchronously without the need to grab
195 * any modeset locks at all under one conditions: It must be guaranteed
196 * that the asynchronous work has either been cancelled (if the driver
197 * supports it, which at least requires that the framebuffers get
198 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
199 * before the new state gets committed on the software side with
200 * drm_atomic_helper_swap_state().
201 *
202 * This scheme allows new atomic state updates to be prepared and
203 * checked in parallel to the asynchronous completion of the previous
204 * update. Which is important since compositors need to figure out the
205 * composition of the next frame right after having submitted the
206 * current layout.
207 */
208
209 drm_atomic_state_get(state);
210 if (nonblock)
211 queue_work(system_unbound_wq, &state->commit_work);
212 else
213 commit_tail(state);
214
215 return 0;
216
217error:
218 drm_atomic_helper_cleanup_planes(dev, state);
219 return ret;
220}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9cec74c79aa2..021a0b6f9a59 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -41,7 +41,11 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
41 .fb_create = msm_framebuffer_create, 41 .fb_create = msm_framebuffer_create,
42 .output_poll_changed = drm_fb_helper_output_poll_changed, 42 .output_poll_changed = drm_fb_helper_output_poll_changed,
43 .atomic_check = drm_atomic_helper_check, 43 .atomic_check = drm_atomic_helper_check,
44 .atomic_commit = msm_atomic_commit, 44 .atomic_commit = drm_atomic_helper_commit,
45};
46
47static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
48 .atomic_commit_tail = msm_atomic_commit_tail,
45}; 49};
46 50
47#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING 51#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
@@ -438,6 +442,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
438 } 442 }
439 443
440 ddev->mode_config.funcs = &mode_config_funcs; 444 ddev->mode_config.funcs = &mode_config_funcs;
445 ddev->mode_config.helper_private = &mode_config_helper_funcs;
441 446
442 ret = drm_vblank_init(ddev, priv->num_crtcs); 447 ret = drm_vblank_init(ddev, priv->num_crtcs);
443 if (ret < 0) { 448 if (ret < 0) {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 2b2688896295..b2da1fbf81e0 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -158,8 +158,7 @@ struct msm_format {
158 158
159int msm_atomic_prepare_fb(struct drm_plane *plane, 159int msm_atomic_prepare_fb(struct drm_plane *plane,
160 struct drm_plane_state *new_state); 160 struct drm_plane_state *new_state);
161int msm_atomic_commit(struct drm_device *dev, 161void msm_atomic_commit_tail(struct drm_atomic_state *state);
162 struct drm_atomic_state *state, bool nonblock);
163struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); 162struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
164void msm_atomic_state_clear(struct drm_atomic_state *state); 163void msm_atomic_state_clear(struct drm_atomic_state *state);
165void msm_atomic_state_free(struct drm_atomic_state *state); 164void msm_atomic_state_free(struct drm_atomic_state *state);