diff options
author | Rob Clark <robdclark@gmail.com> | 2013-09-14 14:01:55 -0400 |
---|---|---|
committer | Rob Clark <robdclark@gmail.com> | 2013-11-01 12:39:45 -0400 |
commit | edd4fc63a33eeeb922503b14e8040a3b028c76a5 (patch) | |
tree | 6916bd23f5af0045c7fad9e0fb73eff5990e02aa /drivers/gpu/drm/msm/msm_gem.c | |
parent | a862391871004bf8dea2299bb712aa93a512334a (diff) |
drm/msm: rework inactive-work
Re-arrange things a bit so that we can get work requested after a bo
fence passes, like pageflip, done before retiring bo's. Without any
sort of bo cache in userspace, some games can trigger hundred's of
transient bo's, which can cause retire to take a long time (5-10ms).
Obviously we want a bo cache.. but this cleanup will make things a
bit easier for atomic as well and makes things a bit cleaner.
Signed-off-by: Rob Clark <robdclark@gmail.com>
Acked-by: David Brown <davidb@codeaurora.org>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 35 |
1 files changed, 19 insertions, 16 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index ea2c96f9459b..291939de299e 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -309,7 +309,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, | |||
309 | 309 | ||
310 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) | 310 | int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) |
311 | { | 311 | { |
312 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
312 | int ret; | 313 | int ret; |
314 | |||
315 | /* this is safe right now because we don't unmap until the | ||
316 | * bo is deleted: | ||
317 | */ | ||
318 | if (msm_obj->domain[id].iova) { | ||
319 | *iova = msm_obj->domain[id].iova; | ||
320 | return 0; | ||
321 | } | ||
322 | |||
313 | mutex_lock(&obj->dev->struct_mutex); | 323 | mutex_lock(&obj->dev->struct_mutex); |
314 | ret = msm_gem_get_iova_locked(obj, id, iova); | 324 | ret = msm_gem_get_iova_locked(obj, id, iova); |
315 | mutex_unlock(&obj->dev->struct_mutex); | 325 | mutex_unlock(&obj->dev->struct_mutex); |
@@ -379,8 +389,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj) | |||
379 | return ret; | 389 | return ret; |
380 | } | 390 | } |
381 | 391 | ||
382 | int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | 392 | /* setup callback for when bo is no longer busy.. |
383 | struct work_struct *work) | 393 | * TODO probably want to differentiate read vs write.. |
394 | */ | ||
395 | int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, | ||
396 | struct msm_fence_cb *cb) | ||
384 | { | 397 | { |
385 | struct drm_device *dev = obj->dev; | 398 | struct drm_device *dev = obj->dev; |
386 | struct msm_drm_private *priv = dev->dev_private; | 399 | struct msm_drm_private *priv = dev->dev_private; |
@@ -388,12 +401,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj, | |||
388 | int ret = 0; | 401 | int ret = 0; |
389 | 402 | ||
390 | mutex_lock(&dev->struct_mutex); | 403 | mutex_lock(&dev->struct_mutex); |
391 | if (!list_empty(&work->entry)) { | 404 | if (!list_empty(&cb->work.entry)) { |
392 | ret = -EINVAL; | 405 | ret = -EINVAL; |
393 | } else if (is_active(msm_obj)) { | 406 | } else if (is_active(msm_obj)) { |
394 | list_add_tail(&work->entry, &msm_obj->inactive_work); | 407 | cb->fence = max(msm_obj->read_fence, msm_obj->write_fence); |
408 | list_add_tail(&cb->work.entry, &priv->fence_cbs); | ||
395 | } else { | 409 | } else { |
396 | queue_work(priv->wq, work); | 410 | queue_work(priv->wq, &cb->work); |
397 | } | 411 | } |
398 | mutex_unlock(&dev->struct_mutex); | 412 | mutex_unlock(&dev->struct_mutex); |
399 | 413 | ||
@@ -426,16 +440,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) | |||
426 | msm_obj->write_fence = 0; | 440 | msm_obj->write_fence = 0; |
427 | list_del_init(&msm_obj->mm_list); | 441 | list_del_init(&msm_obj->mm_list); |
428 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 442 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
429 | |||
430 | while (!list_empty(&msm_obj->inactive_work)) { | ||
431 | struct work_struct *work; | ||
432 | |||
433 | work = list_first_entry(&msm_obj->inactive_work, | ||
434 | struct work_struct, entry); | ||
435 | |||
436 | list_del_init(&work->entry); | ||
437 | queue_work(priv->wq, work); | ||
438 | } | ||
439 | } | 443 | } |
440 | 444 | ||
441 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, | 445 | int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, |
@@ -604,7 +608,6 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
604 | reservation_object_init(msm_obj->resv); | 608 | reservation_object_init(msm_obj->resv); |
605 | 609 | ||
606 | INIT_LIST_HEAD(&msm_obj->submit_entry); | 610 | INIT_LIST_HEAD(&msm_obj->submit_entry); |
607 | INIT_LIST_HEAD(&msm_obj->inactive_work); | ||
608 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); | 611 | list_add_tail(&msm_obj->mm_list, &priv->inactive_list); |
609 | 612 | ||
610 | *obj = &msm_obj->base; | 613 | *obj = &msm_obj->base; |