aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 15e341634536..ee121ec2917b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,15 +35,24 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
35 ctx->adev = adev; 35 ctx->adev = adev;
36 kref_init(&ctx->refcount); 36 kref_init(&ctx->refcount);
37 spin_lock_init(&ctx->ring_lock); 37 spin_lock_init(&ctx->ring_lock);
38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 38 ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
39 ctx->rings[i].sequence = 1; 39 AMDGPU_MAX_RINGS, GFP_KERNEL);
40 if (!ctx->fences)
41 return -ENOMEM;
40 42
43 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
44 ctx->rings[i].sequence = 1;
45 ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
46 amdgpu_sched_jobs * i;
47 }
41 if (amdgpu_enable_scheduler) { 48 if (amdgpu_enable_scheduler) {
42 /* create context entity for each ring */ 49 /* create context entity for each ring */
43 for (i = 0; i < adev->num_rings; i++) { 50 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq; 51 struct amd_sched_rq *rq;
45 if (pri >= AMD_SCHED_MAX_PRIORITY) 52 if (pri >= AMD_SCHED_MAX_PRIORITY) {
53 kfree(ctx->fences);
46 return -EINVAL; 54 return -EINVAL;
55 }
47 rq = &adev->rings[i]->sched.sched_rq[pri]; 56 rq = &adev->rings[i]->sched.sched_rq[pri];
48 r = amd_sched_entity_init(&adev->rings[i]->sched, 57 r = amd_sched_entity_init(&adev->rings[i]->sched,
49 &ctx->rings[i].entity, 58 &ctx->rings[i].entity,
@@ -56,6 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
56 for (j = 0; j < i; j++) 65 for (j = 0; j < i; j++)
57 amd_sched_entity_fini(&adev->rings[j]->sched, 66 amd_sched_entity_fini(&adev->rings[j]->sched,
58 &ctx->rings[j].entity); 67 &ctx->rings[j].entity);
68 kfree(ctx->fences);
59 return r; 69 return r;
60 } 70 }
61 } 71 }
@@ -71,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
71 return; 81 return;
72 82
73 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 83 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
74 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) 84 for (j = 0; j < amdgpu_sched_jobs; ++j)
75 fence_put(ctx->rings[i].fences[j]); 85 fence_put(ctx->rings[i].fences[j]);
86 kfree(ctx->fences);
76 87
77 if (amdgpu_enable_scheduler) { 88 if (amdgpu_enable_scheduler) {
78 for (i = 0; i < adev->num_rings; i++) 89 for (i = 0; i < adev->num_rings; i++)
@@ -241,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
241 unsigned idx = 0; 252 unsigned idx = 0;
242 struct fence *other = NULL; 253 struct fence *other = NULL;
243 254
244 idx = seq % AMDGPU_CTX_MAX_CS_PENDING; 255 idx = seq % amdgpu_sched_jobs;
245 other = cring->fences[idx]; 256 other = cring->fences[idx];
246 if (other) { 257 if (other) {
247 signed long r; 258 signed long r;
@@ -276,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
276 } 287 }
277 288
278 289
279 if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { 290 if (seq + amdgpu_sched_jobs < cring->sequence) {
280 spin_unlock(&ctx->ring_lock); 291 spin_unlock(&ctx->ring_lock);
281 return NULL; 292 return NULL;
282 } 293 }
283 294
284 fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); 295 fence = fence_get(cring->fences[seq % amdgpu_sched_jobs]);
285 spin_unlock(&ctx->ring_lock); 296 spin_unlock(&ctx->ring_lock);
286 297
287 return fence; 298 return fence;