diff options
author | Rob Clark <robdclark@gmail.com> | 2016-03-16 18:18:17 -0400 |
---|---|---|
committer | Rob Clark <robdclark@gmail.com> | 2016-05-08 10:22:14 -0400 |
commit | ba00c3f2f0c84456ffe9d548823ff4fb8e4e7ed4 (patch) | |
tree | 1cddb413a2c1f4281f938f356bf74cfaf33cedcc | |
parent | ca762a8ae7f453978a4769af9dcd3cb08e45b932 (diff) |
drm/msm: remove fence_cbs
This was only used for atomic commit these days. So instead just give
atomic it's own work-queue where we can do a block on each bo in turn.
Simplifies things a whole bunch and makes the 'struct fence' conversion
easier.
Signed-off-by: Rob Clark <robdclark@gmail.com>
-rw-r--r-- | drivers/gpu/drm/msm/msm_atomic.c | 77 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_drv.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_fence.c | 43 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_fence.h | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 9 |
6 files changed, 47 insertions, 107 deletions
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index a2a3d9f25b7b..6ac7192d0ad6 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c | |||
@@ -18,18 +18,16 @@ | |||
18 | #include "msm_drv.h" | 18 | #include "msm_drv.h" |
19 | #include "msm_kms.h" | 19 | #include "msm_kms.h" |
20 | #include "msm_gem.h" | 20 | #include "msm_gem.h" |
21 | #include "msm_gpu.h" /* temporary */ | ||
22 | #include "msm_fence.h" | 21 | #include "msm_fence.h" |
23 | 22 | ||
24 | struct msm_commit { | 23 | struct msm_commit { |
25 | struct drm_device *dev; | 24 | struct drm_device *dev; |
26 | struct drm_atomic_state *state; | 25 | struct drm_atomic_state *state; |
27 | uint32_t fence; | 26 | struct work_struct work; |
28 | struct msm_fence_cb fence_cb; | ||
29 | uint32_t crtc_mask; | 27 | uint32_t crtc_mask; |
30 | }; | 28 | }; |
31 | 29 | ||
32 | static void fence_cb(struct msm_fence_cb *cb); | 30 | static void commit_worker(struct work_struct *work); |
33 | 31 | ||
34 | /* block until specified crtcs are no longer pending update, and | 32 | /* block until specified crtcs are no longer pending update, and |
35 | * atomically mark them as pending update | 33 | * atomically mark them as pending update |
@@ -71,11 +69,7 @@ static struct msm_commit *commit_init(struct drm_atomic_state *state) | |||
71 | c->dev = state->dev; | 69 | c->dev = state->dev; |
72 | c->state = state; | 70 | c->state = state; |
73 | 71 | ||
74 | /* TODO we might need a way to indicate to run the cb on a | 72 | INIT_WORK(&c->work, commit_worker); |
75 | * different wq so wait_for_vblanks() doesn't block retiring | ||
76 | * bo's.. | ||
77 | */ | ||
78 | INIT_FENCE_CB(&c->fence_cb, fence_cb); | ||
79 | 73 | ||
80 | return c; | 74 | return c; |
81 | } | 75 | } |
@@ -113,16 +107,39 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, | |||
113 | } | 107 | } |
114 | } | 108 | } |
115 | 109 | ||
110 | static void wait_fences(struct msm_commit *c, bool async) | ||
111 | { | ||
112 | int nplanes = c->dev->mode_config.num_total_plane; | ||
113 | ktime_t timeout = ktime_add_ms(ktime_get(), 1000); | ||
114 | int i; | ||
115 | |||
116 | for (i = 0; i < nplanes; i++) { | ||
117 | struct drm_plane *plane = c->state->planes[i]; | ||
118 | struct drm_plane_state *new_state = c->state->plane_states[i]; | ||
119 | |||
120 | if (!plane) | ||
121 | continue; | ||
122 | |||
123 | if ((plane->state->fb != new_state->fb) && new_state->fb) { | ||
124 | struct drm_gem_object *obj = | ||
125 | msm_framebuffer_bo(new_state->fb, 0); | ||
126 | msm_gem_cpu_sync(obj, MSM_PREP_READ, &timeout); | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | |||
116 | /* The (potentially) asynchronous part of the commit. At this point | 131 | /* The (potentially) asynchronous part of the commit. At this point |
117 | * nothing can fail short of armageddon. | 132 | * nothing can fail short of armageddon. |
118 | */ | 133 | */ |
119 | static void complete_commit(struct msm_commit *c) | 134 | static void complete_commit(struct msm_commit *c, bool async) |
120 | { | 135 | { |
121 | struct drm_atomic_state *state = c->state; | 136 | struct drm_atomic_state *state = c->state; |
122 | struct drm_device *dev = state->dev; | 137 | struct drm_device *dev = state->dev; |
123 | struct msm_drm_private *priv = dev->dev_private; | 138 | struct msm_drm_private *priv = dev->dev_private; |
124 | struct msm_kms *kms = priv->kms; | 139 | struct msm_kms *kms = priv->kms; |
125 | 140 | ||
141 | wait_fences(c, async); | ||
142 | |||
126 | kms->funcs->prepare_commit(kms, state); | 143 | kms->funcs->prepare_commit(kms, state); |
127 | 144 | ||
128 | drm_atomic_helper_commit_modeset_disables(dev, state); | 145 | drm_atomic_helper_commit_modeset_disables(dev, state); |
@@ -155,17 +172,9 @@ static void complete_commit(struct msm_commit *c) | |||
155 | commit_destroy(c); | 172 | commit_destroy(c); |
156 | } | 173 | } |
157 | 174 | ||
158 | static void fence_cb(struct msm_fence_cb *cb) | 175 | static void commit_worker(struct work_struct *work) |
159 | { | 176 | { |
160 | struct msm_commit *c = | 177 | complete_commit(container_of(work, struct msm_commit, work), true); |
161 | container_of(cb, struct msm_commit, fence_cb); | ||
162 | complete_commit(c); | ||
163 | } | ||
164 | |||
165 | static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb) | ||
166 | { | ||
167 | struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0); | ||
168 | c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ)); | ||
169 | } | 178 | } |
170 | 179 | ||
171 | int msm_atomic_check(struct drm_device *dev, | 180 | int msm_atomic_check(struct drm_device *dev, |
@@ -204,9 +213,7 @@ int msm_atomic_commit(struct drm_device *dev, | |||
204 | struct drm_atomic_state *state, bool nonblock) | 213 | struct drm_atomic_state *state, bool nonblock) |
205 | { | 214 | { |
206 | struct msm_drm_private *priv = dev->dev_private; | 215 | struct msm_drm_private *priv = dev->dev_private; |
207 | int nplanes = dev->mode_config.num_total_plane; | ||
208 | int ncrtcs = dev->mode_config.num_crtc; | 216 | int ncrtcs = dev->mode_config.num_crtc; |
209 | ktime_t timeout; | ||
210 | struct msm_commit *c; | 217 | struct msm_commit *c; |
211 | int i, ret; | 218 | int i, ret; |
212 | 219 | ||
@@ -231,20 +238,6 @@ int msm_atomic_commit(struct drm_device *dev, | |||
231 | } | 238 | } |
232 | 239 | ||
233 | /* | 240 | /* |
234 | * Figure out what fence to wait for: | ||
235 | */ | ||
236 | for (i = 0; i < nplanes; i++) { | ||
237 | struct drm_plane *plane = state->planes[i]; | ||
238 | struct drm_plane_state *new_state = state->plane_states[i]; | ||
239 | |||
240 | if (!plane) | ||
241 | continue; | ||
242 | |||
243 | if ((plane->state->fb != new_state->fb) && new_state->fb) | ||
244 | add_fb(c, new_state->fb); | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Wait for pending updates on any of the same crtc's and then | 241 | * Wait for pending updates on any of the same crtc's and then |
249 | * mark our set of crtc's as busy: | 242 | * mark our set of crtc's as busy: |
250 | */ | 243 | */ |
@@ -278,18 +271,12 @@ int msm_atomic_commit(struct drm_device *dev, | |||
278 | * current layout. | 271 | * current layout. |
279 | */ | 272 | */ |
280 | 273 | ||
281 | if (nonblock && priv->gpu) { | 274 | if (nonblock) { |
282 | msm_queue_fence_cb(priv->gpu->fctx, &c->fence_cb, c->fence); | 275 | queue_work(priv->atomic_wq, &c->work); |
283 | return 0; | 276 | return 0; |
284 | } | 277 | } |
285 | 278 | ||
286 | timeout = ktime_add_ms(ktime_get(), 1000); | 279 | complete_commit(c, false); |
287 | |||
288 | /* uninterruptible wait */ | ||
289 | if (priv->gpu) | ||
290 | msm_wait_fence(priv->gpu->fctx, c->fence, &timeout, false); | ||
291 | |||
292 | complete_commit(c); | ||
293 | 280 | ||
294 | return 0; | 281 | return 0; |
295 | 282 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 2b859f38772a..745793991ba6 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -213,6 +213,9 @@ static int msm_unload(struct drm_device *dev) | |||
213 | flush_workqueue(priv->wq); | 213 | flush_workqueue(priv->wq); |
214 | destroy_workqueue(priv->wq); | 214 | destroy_workqueue(priv->wq); |
215 | 215 | ||
216 | flush_workqueue(priv->atomic_wq); | ||
217 | destroy_workqueue(priv->atomic_wq); | ||
218 | |||
216 | if (kms) { | 219 | if (kms) { |
217 | pm_runtime_disable(dev->dev); | 220 | pm_runtime_disable(dev->dev); |
218 | kms->funcs->destroy(kms); | 221 | kms->funcs->destroy(kms); |
@@ -339,6 +342,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) | |||
339 | dev->dev_private = priv; | 342 | dev->dev_private = priv; |
340 | 343 | ||
341 | priv->wq = alloc_ordered_workqueue("msm", 0); | 344 | priv->wq = alloc_ordered_workqueue("msm", 0); |
345 | priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); | ||
342 | init_waitqueue_head(&priv->pending_crtcs_event); | 346 | init_waitqueue_head(&priv->pending_crtcs_event); |
343 | 347 | ||
344 | INIT_LIST_HEAD(&priv->inactive_list); | 348 | INIT_LIST_HEAD(&priv->inactive_list); |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 6c3f67bc1bb7..7124c7f262ec 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -109,6 +109,7 @@ struct msm_drm_private { | |||
109 | struct list_head inactive_list; | 109 | struct list_head inactive_list; |
110 | 110 | ||
111 | struct workqueue_struct *wq; | 111 | struct workqueue_struct *wq; |
112 | struct workqueue_struct *atomic_wq; | ||
112 | 113 | ||
113 | /* crtcs pending async atomic updates: */ | 114 | /* crtcs pending async atomic updates: */ |
114 | uint32_t pending_crtcs; | 115 | uint32_t pending_crtcs; |
@@ -192,8 +193,8 @@ void *msm_gem_vaddr(struct drm_gem_object *obj); | |||
192 | void msm_gem_move_to_active(struct drm_gem_object *obj, | 193 | void msm_gem_move_to_active(struct drm_gem_object *obj, |
193 | struct msm_gpu *gpu, bool write, uint32_t fence); | 194 | struct msm_gpu *gpu, bool write, uint32_t fence); |
194 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); | 195 | void msm_gem_move_to_inactive(struct drm_gem_object *obj); |
195 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | 196 | int msm_gem_cpu_sync(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); |
196 | ktime_t *timeout); | 197 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); |
197 | int msm_gem_cpu_fini(struct drm_gem_object *obj); | 198 | int msm_gem_cpu_fini(struct drm_gem_object *obj); |
198 | void msm_gem_free_object(struct drm_gem_object *obj); | 199 | void msm_gem_free_object(struct drm_gem_object *obj); |
199 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, | 200 | int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, |
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index f0ed6a680653..088610ca80ca 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c | |||
@@ -33,7 +33,6 @@ msm_fence_context_alloc(struct drm_device *dev, const char *name) | |||
33 | fctx->dev = dev; | 33 | fctx->dev = dev; |
34 | fctx->name = name; | 34 | fctx->name = name; |
35 | init_waitqueue_head(&fctx->event); | 35 | init_waitqueue_head(&fctx->event); |
36 | INIT_LIST_HEAD(&fctx->fence_cbs); | ||
37 | 36 | ||
38 | return fctx; | 37 | return fctx; |
39 | } | 38 | } |
@@ -86,54 +85,12 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, | |||
86 | return ret; | 85 | return ret; |
87 | } | 86 | } |
88 | 87 | ||
89 | int msm_queue_fence_cb(struct msm_fence_context *fctx, | ||
90 | struct msm_fence_cb *cb, uint32_t fence) | ||
91 | { | ||
92 | struct msm_drm_private *priv = fctx->dev->dev_private; | ||
93 | int ret = 0; | ||
94 | |||
95 | mutex_lock(&fctx->dev->struct_mutex); | ||
96 | if (!list_empty(&cb->work.entry)) { | ||
97 | ret = -EINVAL; | ||
98 | } else if (fence > fctx->completed_fence) { | ||
99 | cb->fence = fence; | ||
100 | list_add_tail(&cb->work.entry, &fctx->fence_cbs); | ||
101 | } else { | ||
102 | queue_work(priv->wq, &cb->work); | ||
103 | } | ||
104 | mutex_unlock(&fctx->dev->struct_mutex); | ||
105 | |||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | /* called from workqueue */ | 88 | /* called from workqueue */ |
110 | void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) | 89 | void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) |
111 | { | 90 | { |
112 | struct msm_drm_private *priv = fctx->dev->dev_private; | ||
113 | |||
114 | mutex_lock(&fctx->dev->struct_mutex); | 91 | mutex_lock(&fctx->dev->struct_mutex); |
115 | fctx->completed_fence = max(fence, fctx->completed_fence); | 92 | fctx->completed_fence = max(fence, fctx->completed_fence); |
116 | |||
117 | while (!list_empty(&fctx->fence_cbs)) { | ||
118 | struct msm_fence_cb *cb; | ||
119 | |||
120 | cb = list_first_entry(&fctx->fence_cbs, | ||
121 | struct msm_fence_cb, work.entry); | ||
122 | |||
123 | if (cb->fence > fctx->completed_fence) | ||
124 | break; | ||
125 | |||
126 | list_del_init(&cb->work.entry); | ||
127 | queue_work(priv->wq, &cb->work); | ||
128 | } | ||
129 | |||
130 | mutex_unlock(&fctx->dev->struct_mutex); | 93 | mutex_unlock(&fctx->dev->struct_mutex); |
131 | 94 | ||
132 | wake_up_all(&fctx->event); | 95 | wake_up_all(&fctx->event); |
133 | } | 96 | } |
134 | |||
135 | void __msm_fence_worker(struct work_struct *work) | ||
136 | { | ||
137 | struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work); | ||
138 | cb->func(cb); | ||
139 | } | ||
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index 3ed20981fc05..2820781d974a 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h | |||
@@ -27,28 +27,12 @@ struct msm_fence_context { | |||
27 | uint32_t last_fence; /* last assigned fence */ | 27 | uint32_t last_fence; /* last assigned fence */ |
28 | uint32_t completed_fence; /* last completed fence */ | 28 | uint32_t completed_fence; /* last completed fence */ |
29 | wait_queue_head_t event; | 29 | wait_queue_head_t event; |
30 | /* callbacks deferred until bo is inactive: */ | ||
31 | struct list_head fence_cbs; | ||
32 | }; | 30 | }; |
33 | 31 | ||
34 | struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev, | 32 | struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev, |
35 | const char *name); | 33 | const char *name); |
36 | void msm_fence_context_free(struct msm_fence_context *fctx); | 34 | void msm_fence_context_free(struct msm_fence_context *fctx); |
37 | 35 | ||
38 | /* callback from wq once fence has passed: */ | ||
39 | struct msm_fence_cb { | ||
40 | struct work_struct work; | ||
41 | uint32_t fence; | ||
42 | void (*func)(struct msm_fence_cb *cb); | ||
43 | }; | ||
44 | |||
45 | void __msm_fence_worker(struct work_struct *work); | ||
46 | |||
47 | #define INIT_FENCE_CB(_cb, _func) do { \ | ||
48 | INIT_WORK(&(_cb)->work, __msm_fence_worker); \ | ||
49 | (_cb)->func = _func; \ | ||
50 | } while (0) | ||
51 | |||
52 | int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, | 36 | int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence, |
53 | ktime_t *timeout, bool interruptible); | 37 | ktime_t *timeout, bool interruptible); |
54 | int msm_queue_fence_cb(struct msm_fence_context *fctx, | 38 | int msm_queue_fence_cb(struct msm_fence_context *fctx, |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 80aba76f7a4d..9080ed13998a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -439,7 +439,7 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |||
439 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 439 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
440 | } | 440 | } |
441 | 441 | ||
442 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) | 442 | int msm_gem_cpu_sync(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) |
443 | { | 443 | { |
444 | struct drm_device *dev = obj->dev; | 444 | struct drm_device *dev = obj->dev; |
445 | struct msm_drm_private *priv = dev->dev_private; | 445 | struct msm_drm_private *priv = dev->dev_private; |
@@ -456,6 +456,13 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) | |||
456 | ret = msm_wait_fence(priv->gpu->fctx, fence, timeout, true); | 456 | ret = msm_wait_fence(priv->gpu->fctx, fence, timeout, true); |
457 | } | 457 | } |
458 | 458 | ||
459 | return ret; | ||
460 | } | ||
461 | |||
462 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) | ||
463 | { | ||
464 | int ret = msm_gem_cpu_sync(obj, op, timeout); | ||
465 | |||
459 | /* TODO cache maintenance */ | 466 | /* TODO cache maintenance */ |
460 | 467 | ||
461 | return ret; | 468 | return ret; |