aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c40
1 files changed, 16 insertions, 24 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4b1b82adabde..4a6f0e49d5b5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -309,6 +309,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
309 return ret; 309 return ret;
310} 310}
311 311
312/* get iova, taking a reference. Should have a matching put */
312int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) 313int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
313{ 314{
314 struct msm_gem_object *msm_obj = to_msm_bo(obj); 315 struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -328,6 +329,16 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
328 return ret; 329 return ret;
329} 330}
330 331
332/* get iova without taking a reference, used in places where you have
333 * already done a 'msm_gem_get_iova()'.
334 */
335uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
336{
337 struct msm_gem_object *msm_obj = to_msm_bo(obj);
338 WARN_ON(!msm_obj->domain[id].iova);
339 return msm_obj->domain[id].iova;
340}
341
331void msm_gem_put_iova(struct drm_gem_object *obj, int id) 342void msm_gem_put_iova(struct drm_gem_object *obj, int id)
332{ 343{
333 // XXX TODO .. 344 // XXX TODO ..
@@ -397,23 +408,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
397int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, 408int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
398 struct msm_fence_cb *cb) 409 struct msm_fence_cb *cb)
399{ 410{
400 struct drm_device *dev = obj->dev;
401 struct msm_drm_private *priv = dev->dev_private;
402 struct msm_gem_object *msm_obj = to_msm_bo(obj); 411 struct msm_gem_object *msm_obj = to_msm_bo(obj);
403 int ret = 0; 412 uint32_t fence = msm_gem_fence(msm_obj,
404 413 MSM_PREP_READ | MSM_PREP_WRITE);
405 mutex_lock(&dev->struct_mutex); 414 return msm_queue_fence_cb(obj->dev, cb, fence);
406 if (!list_empty(&cb->work.entry)) {
407 ret = -EINVAL;
408 } else if (is_active(msm_obj)) {
409 cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
410 list_add_tail(&cb->work.entry, &priv->fence_cbs);
411 } else {
412 queue_work(priv->wq, &cb->work);
413 }
414 mutex_unlock(&dev->struct_mutex);
415
416 return ret;
417} 415}
418 416
419void msm_gem_move_to_active(struct drm_gem_object *obj, 417void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -452,12 +450,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
452 int ret = 0; 450 int ret = 0;
453 451
454 if (is_active(msm_obj)) { 452 if (is_active(msm_obj)) {
455 uint32_t fence = 0; 453 uint32_t fence = msm_gem_fence(msm_obj, op);
456 454
457 if (op & MSM_PREP_READ)
458 fence = msm_obj->write_fence;
459 if (op & MSM_PREP_WRITE)
460 fence = max(fence, msm_obj->read_fence);
461 if (op & MSM_PREP_NOSYNC) 455 if (op & MSM_PREP_NOSYNC)
462 timeout = NULL; 456 timeout = NULL;
463 457
@@ -525,13 +519,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
525 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 519 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
526 struct msm_mmu *mmu = priv->mmus[id]; 520 struct msm_mmu *mmu = priv->mmus[id];
527 if (mmu && msm_obj->domain[id].iova) { 521 if (mmu && msm_obj->domain[id].iova) {
528 uint32_t offset = (uint32_t)mmap_offset(obj); 522 uint32_t offset = msm_obj->domain[id].iova;
529 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); 523 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
530 } 524 }
531 } 525 }
532 526
533 drm_gem_free_mmap_offset(obj);
534
535 if (obj->import_attach) { 527 if (obj->import_attach) {
536 if (msm_obj->vaddr) 528 if (msm_obj->vaddr)
537 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 529 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);