aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-02-01 05:20:37 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-02-10 14:17:19 -0500
commit4c0b242cf27094b666df6604420417e201260af9 (patch)
tree1004e7c6f70d3ad63632d419c4f3f2996208675c /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
parent50838c8cc413de8da39c4c216ae05410845d5a44 (diff)
drm/amdgpu: cleanup user fence handling in the CS
Don't keep that around twice. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c29
1 files changed, 13 insertions, 16 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e9d88771783b..8f3b72f5c91c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -87,6 +87,7 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
87} 87}
88 88
89static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, 89static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
90 struct amdgpu_user_fence *uf,
90 struct drm_amdgpu_cs_chunk_fence *fence_data) 91 struct drm_amdgpu_cs_chunk_fence *fence_data)
91{ 92{
92 struct drm_gem_object *gobj; 93 struct drm_gem_object *gobj;
@@ -98,15 +99,15 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
98 if (gobj == NULL) 99 if (gobj == NULL)
99 return -EINVAL; 100 return -EINVAL;
100 101
101 p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 102 uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
102 p->uf.offset = fence_data->offset; 103 uf->offset = fence_data->offset;
103 104
104 if (amdgpu_ttm_tt_get_usermm(p->uf.bo->tbo.ttm)) { 105 if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
105 drm_gem_object_unreference_unlocked(gobj); 106 drm_gem_object_unreference_unlocked(gobj);
106 return -EINVAL; 107 return -EINVAL;
107 } 108 }
108 109
109 p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); 110 p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
110 p->uf_entry.priority = 0; 111 p->uf_entry.priority = 0;
111 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; 112 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
112 p->uf_entry.tv.shared = true; 113 p->uf_entry.tv.shared = true;
@@ -117,10 +118,11 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
117 118
118int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) 119int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
119{ 120{
121 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
120 union drm_amdgpu_cs *cs = data; 122 union drm_amdgpu_cs *cs = data;
121 uint64_t *chunk_array_user; 123 uint64_t *chunk_array_user;
122 uint64_t *chunk_array; 124 uint64_t *chunk_array;
123 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 125 struct amdgpu_user_fence uf = {};
124 unsigned size, num_ibs = 0; 126 unsigned size, num_ibs = 0;
125 int i; 127 int i;
126 int ret; 128 int ret;
@@ -196,7 +198,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
196 goto free_partial_kdata; 198 goto free_partial_kdata;
197 } 199 }
198 200
199 ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); 201 ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
200 if (ret) 202 if (ret)
201 goto free_partial_kdata; 203 goto free_partial_kdata;
202 204
@@ -215,6 +217,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
215 if (ret) 217 if (ret)
216 goto free_all_kdata; 218 goto free_all_kdata;
217 219
220 p->job->uf = uf;
221
218 kfree(chunk_array); 222 kfree(chunk_array);
219 return 0; 223 return 0;
220 224
@@ -353,7 +357,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
353 INIT_LIST_HEAD(&duplicates); 357 INIT_LIST_HEAD(&duplicates);
354 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); 358 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
355 359
356 if (p->uf.bo) 360 if (p->job->uf.bo)
357 list_add(&p->uf_entry.tv.head, &p->validated); 361 list_add(&p->uf_entry.tv.head, &p->validated);
358 362
359 if (need_mmap_lock) 363 if (need_mmap_lock)
@@ -472,7 +476,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
472 kfree(parser->chunks); 476 kfree(parser->chunks);
473 if (parser->job) 477 if (parser->job)
474 amdgpu_job_free(parser->job); 478 amdgpu_job_free(parser->job);
475 amdgpu_bo_unref(&parser->uf.bo);
476 amdgpu_bo_unref(&parser->uf_entry.robj); 479 amdgpu_bo_unref(&parser->uf_entry.robj);
477} 480}
478 481
@@ -673,7 +676,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
673 } 676 }
674 } 677 }
675 /* wrap the last IB with user fence */ 678 /* wrap the last IB with user fence */
676 if (parser->uf.bo) { 679 if (parser->job->uf.bo) {
677 struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1]; 680 struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
678 681
679 /* UVD & VCE fw doesn't support user fences */ 682 /* UVD & VCE fw doesn't support user fences */
@@ -681,7 +684,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
681 ib->ring->type == AMDGPU_RING_TYPE_VCE) 684 ib->ring->type == AMDGPU_RING_TYPE_VCE)
682 return -EINVAL; 685 return -EINVAL;
683 686
684 ib->user = &parser->uf; 687 ib->user = &parser->job->uf;
685 } 688 }
686 689
687 return 0; 690 return 0;
@@ -767,12 +770,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
767 job->owner = p->filp; 770 job->owner = p->filp;
768 job->free_job = amdgpu_cs_free_job; 771 job->free_job = amdgpu_cs_free_job;
769 772
770 if (job->ibs[job->num_ibs - 1].user) {
771 job->uf = p->uf;
772 job->ibs[job->num_ibs - 1].user = &job->uf;
773 p->uf.bo = NULL;
774 }
775
776 fence = amd_sched_fence_create(job->base.s_entity, p->filp); 773 fence = amd_sched_fence_create(job->base.s_entity, p->filp);
777 if (!fence) { 774 if (!fence) {
778 amdgpu_cs_free_job(job); 775 amdgpu_cs_free_job(job);