diff options
author | Dave Airlie <airlied@redhat.com> | 2018-11-28 19:21:23 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-11-28 19:28:49 -0500 |
commit | 61647c77cb15354a329cbb36fe7a2253b36b51b1 (patch) | |
tree | 59d887f99bc4a2bdddc7cfc1d81794c2a4cdc759 /drivers/gpu/drm/v3d | |
parent | 1a31c26ed7b495f152e3103dc7c68e3307a39541 (diff) | |
parent | 08f73d668048ffa3ba6b1426b6ba0a89b16aefd7 (diff) |
Merge tag 'drm-misc-next-2018-11-28' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v4.21:
Core Changes:
- Merge drm_info.c into drm_debugfs.c
- Complete the fake drm_crtc_commit's hw_done/flip_done sooner.
- Remove deprecated drm_obj_ref/unref functions. All drivers use get/put now.
- Decrease stack use of drm_gem_prime_mmap.
- Improve documentation for dumb callbacks.
Driver Changes:
- Add edid support to virtio.
- Wait on implicit fence in meson and sun4i.
- Add support for BGRX8888 to sun4i.
- Preparation patches for sun4i driver to start supporting linear and tiled YUV formats.
- Add support for HDMI 1.4 4k modes to meson, and support for VIC alternate timings.
- Drop custom dumb_map in vkms.
- Small fixes and cleanups to v3d.
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/151a3270-b1be-ed75-bd58-6b29d741f592@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/v3d')
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_gem.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_sched.c | 2 |
2 files changed, 12 insertions, 23 deletions
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index b88c96911453..1e8947c7d954 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c | |||
@@ -210,14 +210,11 @@ static void | |||
210 | v3d_attach_object_fences(struct v3d_exec_info *exec) | 210 | v3d_attach_object_fences(struct v3d_exec_info *exec) |
211 | { | 211 | { |
212 | struct dma_fence *out_fence = exec->render_done_fence; | 212 | struct dma_fence *out_fence = exec->render_done_fence; |
213 | struct v3d_bo *bo; | ||
214 | int i; | 213 | int i; |
215 | 214 | ||
216 | for (i = 0; i < exec->bo_count; i++) { | 215 | for (i = 0; i < exec->bo_count; i++) { |
217 | bo = to_v3d_bo(&exec->bo[i]->base); | ||
218 | |||
219 | /* XXX: Use shared fences for read-only objects. */ | 216 | /* XXX: Use shared fences for read-only objects. */ |
220 | reservation_object_add_excl_fence(bo->resv, out_fence); | 217 | reservation_object_add_excl_fence(exec->bo[i]->resv, out_fence); |
221 | } | 218 | } |
222 | } | 219 | } |
223 | 220 | ||
@@ -228,11 +225,8 @@ v3d_unlock_bo_reservations(struct drm_device *dev, | |||
228 | { | 225 | { |
229 | int i; | 226 | int i; |
230 | 227 | ||
231 | for (i = 0; i < exec->bo_count; i++) { | 228 | for (i = 0; i < exec->bo_count; i++) |
232 | struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base); | 229 | ww_mutex_unlock(&exec->bo[i]->resv->lock); |
233 | |||
234 | ww_mutex_unlock(&bo->resv->lock); | ||
235 | } | ||
236 | 230 | ||
237 | ww_acquire_fini(acquire_ctx); | 231 | ww_acquire_fini(acquire_ctx); |
238 | } | 232 | } |
@@ -251,13 +245,13 @@ v3d_lock_bo_reservations(struct drm_device *dev, | |||
251 | { | 245 | { |
252 | int contended_lock = -1; | 246 | int contended_lock = -1; |
253 | int i, ret; | 247 | int i, ret; |
254 | struct v3d_bo *bo; | ||
255 | 248 | ||
256 | ww_acquire_init(acquire_ctx, &reservation_ww_class); | 249 | ww_acquire_init(acquire_ctx, &reservation_ww_class); |
257 | 250 | ||
258 | retry: | 251 | retry: |
259 | if (contended_lock != -1) { | 252 | if (contended_lock != -1) { |
260 | bo = to_v3d_bo(&exec->bo[contended_lock]->base); | 253 | struct v3d_bo *bo = exec->bo[contended_lock]; |
254 | |||
261 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, | 255 | ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
262 | acquire_ctx); | 256 | acquire_ctx); |
263 | if (ret) { | 257 | if (ret) { |
@@ -270,19 +264,16 @@ retry: | |||
270 | if (i == contended_lock) | 264 | if (i == contended_lock) |
271 | continue; | 265 | continue; |
272 | 266 | ||
273 | bo = to_v3d_bo(&exec->bo[i]->base); | 267 | ret = ww_mutex_lock_interruptible(&exec->bo[i]->resv->lock, |
274 | 268 | acquire_ctx); | |
275 | ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx); | ||
276 | if (ret) { | 269 | if (ret) { |
277 | int j; | 270 | int j; |
278 | 271 | ||
279 | for (j = 0; j < i; j++) { | 272 | for (j = 0; j < i; j++) |
280 | bo = to_v3d_bo(&exec->bo[j]->base); | 273 | ww_mutex_unlock(&exec->bo[j]->resv->lock); |
281 | ww_mutex_unlock(&bo->resv->lock); | ||
282 | } | ||
283 | 274 | ||
284 | if (contended_lock != -1 && contended_lock >= i) { | 275 | if (contended_lock != -1 && contended_lock >= i) { |
285 | bo = to_v3d_bo(&exec->bo[contended_lock]->base); | 276 | struct v3d_bo *bo = exec->bo[contended_lock]; |
286 | 277 | ||
287 | ww_mutex_unlock(&bo->resv->lock); | 278 | ww_mutex_unlock(&bo->resv->lock); |
288 | } | 279 | } |
@@ -303,9 +294,7 @@ retry: | |||
303 | * before we commit the CL to the hardware. | 294 | * before we commit the CL to the hardware. |
304 | */ | 295 | */ |
305 | for (i = 0; i < exec->bo_count; i++) { | 296 | for (i = 0; i < exec->bo_count; i++) { |
306 | bo = to_v3d_bo(&exec->bo[i]->base); | 297 | ret = reservation_object_reserve_shared(exec->bo[i]->resv, 1); |
307 | |||
308 | ret = reservation_object_reserve_shared(bo->resv, 1); | ||
309 | if (ret) { | 298 | if (ret) { |
310 | v3d_unlock_bo_reservations(dev, exec, acquire_ctx); | 299 | v3d_unlock_bo_reservations(dev, exec, acquire_ctx); |
311 | return ret; | 300 | return ret; |
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 445b2ef03303..c66d0ce21435 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c | |||
@@ -41,7 +41,7 @@ v3d_job_free(struct drm_sched_job *sched_job) | |||
41 | } | 41 | } |
42 | 42 | ||
43 | /** | 43 | /** |
44 | * Returns the fences that the bin job depends on, one by one. | 44 | * Returns the fences that the bin or render job depends on, one by one. |
45 | * v3d_job_run() won't be called until all of them have been signaled. | 45 | * v3d_job_run() won't be called until all of them have been signaled. |
46 | */ | 46 | */ |
47 | static struct dma_fence * | 47 | static struct dma_fence * |