aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-05-06 16:14:00 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-05-11 13:30:32 -0400
commit758ac17f963f3497aae4e767d3a9eb68fea71f71 (patch)
treee2007d7fa5077f91d7b6b5a05437fc0c1faef41d /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
parentd88bf583bd06eecb31f82871c90ef6a5a09b5766 (diff)
drm/amdgpu: fix and cleanup user fence handling v2
We leaked the BO in the error pass, additional to that we only have one user fence for all IBs in a job. v2: remove white space changes Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c55
1 files changed, 25 insertions, 30 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9ab2f0886a14..2bbeeb07c187 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -87,33 +87,30 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
87} 87}
88 88
89static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, 89static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
90 struct amdgpu_user_fence *uf, 90 struct drm_amdgpu_cs_chunk_fence *data,
91 struct drm_amdgpu_cs_chunk_fence *fence_data) 91 uint32_t *offset)
92{ 92{
93 struct drm_gem_object *gobj; 93 struct drm_gem_object *gobj;
94 uint32_t handle;
95 94
96 handle = fence_data->handle;
97 gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, 95 gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
98 fence_data->handle); 96 data->handle);
99 if (gobj == NULL) 97 if (gobj == NULL)
100 return -EINVAL; 98 return -EINVAL;
101 99
102 uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 100 p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
103 uf->offset = fence_data->offset;
104
105 if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
106 drm_gem_object_unreference_unlocked(gobj);
107 return -EINVAL;
108 }
109
110 p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
111 p->uf_entry.priority = 0; 101 p->uf_entry.priority = 0;
112 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; 102 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
113 p->uf_entry.tv.shared = true; 103 p->uf_entry.tv.shared = true;
114 p->uf_entry.user_pages = NULL; 104 p->uf_entry.user_pages = NULL;
105 *offset = data->offset;
115 106
116 drm_gem_object_unreference_unlocked(gobj); 107 drm_gem_object_unreference_unlocked(gobj);
108
109 if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
110 amdgpu_bo_unref(&p->uf_entry.robj);
111 return -EINVAL;
112 }
113
117 return 0; 114 return 0;
118} 115}
119 116
@@ -124,8 +121,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
124 union drm_amdgpu_cs *cs = data; 121 union drm_amdgpu_cs *cs = data;
125 uint64_t *chunk_array_user; 122 uint64_t *chunk_array_user;
126 uint64_t *chunk_array; 123 uint64_t *chunk_array;
127 struct amdgpu_user_fence uf = {};
128 unsigned size, num_ibs = 0; 124 unsigned size, num_ibs = 0;
125 uint32_t uf_offset = 0;
129 int i; 126 int i;
130 int ret; 127 int ret;
131 128
@@ -200,7 +197,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
200 goto free_partial_kdata; 197 goto free_partial_kdata;
201 } 198 }
202 199
203 ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata); 200 ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
201 &uf_offset);
204 if (ret) 202 if (ret)
205 goto free_partial_kdata; 203 goto free_partial_kdata;
206 204
@@ -219,7 +217,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
219 if (ret) 217 if (ret)
220 goto free_all_kdata; 218 goto free_all_kdata;
221 219
222 p->job->uf = uf; 220 if (p->uf_entry.robj) {
221 p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
222 p->job->uf_offset = uf_offset;
223 }
223 224
224 kfree(chunk_array); 225 kfree(chunk_array);
225 return 0; 226 return 0;
@@ -377,7 +378,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
377 INIT_LIST_HEAD(&duplicates); 378 INIT_LIST_HEAD(&duplicates);
378 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 379 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
379 380
380 if (p->job->uf.bo) 381 if (p->uf_entry.robj)
381 list_add(&p->uf_entry.tv.head, &p->validated); 382 list_add(&p->uf_entry.tv.head, &p->validated);
382 383
383 if (need_mmap_lock) 384 if (need_mmap_lock)
@@ -760,17 +761,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
760 j++; 761 j++;
761 } 762 }
762 763
763 /* wrap the last IB with user fence */ 764 /* UVD & VCE fw doesn't support user fences */
764 if (parser->job->uf.bo) { 765 if (parser->job->uf_bo && (
765 struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; 766 parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
766 767 parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
767 /* UVD & VCE fw doesn't support user fences */ 768 return -EINVAL;
768 if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
769 parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
770 return -EINVAL;
771
772 ib->user = &parser->job->uf;
773 }
774 769
775 return 0; 770 return 0;
776} 771}
@@ -856,7 +851,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
856 job->ctx = entity->fence_context; 851 job->ctx = entity->fence_context;
857 p->fence = fence_get(fence); 852 p->fence = fence_get(fence);
858 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); 853 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
859 job->ibs[job->num_ibs - 1].sequence = cs->out.handle; 854 job->uf_sequence = cs->out.handle;
860 855
861 trace_amdgpu_cs_ioctl(job); 856 trace_amdgpu_cs_ioctl(job);
862 amd_sched_entity_push_job(&job->base); 857 amd_sched_entity_push_job(&job->base);