aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChunming Zhou <David1.Zhou@amd.com>2015-12-10 02:45:11 -0500
committerAlex Deucher <alexander.deucher@amd.com>2015-12-14 19:45:24 -0500
commit37cd0ca204a55e123fca9ce411e6571ac49fa8f7 (patch)
tree70f04d3818854a2c818b97c467c932c8bc5f56c4
parentc648ed7c5c7f0e3bb4ab11bf08bccf99b42a4cbb (diff)
drm/amdgpu: unify AMDGPU_CTX_MAX_CS_PENDING and amdgpu_sched_jobs
Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c25
2 files changed, 20 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3b5d3706f0cb..c3996e0e2e7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1023,11 +1023,9 @@ int amdgpu_vm_free_job(struct amdgpu_job *job);
1023 * context related structures 1023 * context related structures
1024 */ 1024 */
1025 1025
1026#define AMDGPU_CTX_MAX_CS_PENDING 16
1027
1028struct amdgpu_ctx_ring { 1026struct amdgpu_ctx_ring {
1029 uint64_t sequence; 1027 uint64_t sequence;
1030 struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; 1028 struct fence **fences;
1031 struct amd_sched_entity entity; 1029 struct amd_sched_entity entity;
1032}; 1030};
1033 1031
@@ -1036,6 +1034,7 @@ struct amdgpu_ctx {
1036 struct amdgpu_device *adev; 1034 struct amdgpu_device *adev;
1037 unsigned reset_counter; 1035 unsigned reset_counter;
1038 spinlock_t ring_lock; 1036 spinlock_t ring_lock;
1037 struct fence **fences;
1039 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; 1038 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
1040}; 1039};
1041 1040
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 15e341634536..ee121ec2917b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -35,15 +35,24 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
35 ctx->adev = adev; 35 ctx->adev = adev;
36 kref_init(&ctx->refcount); 36 kref_init(&ctx->refcount);
37 spin_lock_init(&ctx->ring_lock); 37 spin_lock_init(&ctx->ring_lock);
38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 38 ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
39 ctx->rings[i].sequence = 1; 39 AMDGPU_MAX_RINGS, GFP_KERNEL);
40 if (!ctx->fences)
41 return -ENOMEM;
40 42
43 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
44 ctx->rings[i].sequence = 1;
45 ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
46 amdgpu_sched_jobs * i;
47 }
41 if (amdgpu_enable_scheduler) { 48 if (amdgpu_enable_scheduler) {
42 /* create context entity for each ring */ 49 /* create context entity for each ring */
43 for (i = 0; i < adev->num_rings; i++) { 50 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq; 51 struct amd_sched_rq *rq;
45 if (pri >= AMD_SCHED_MAX_PRIORITY) 52 if (pri >= AMD_SCHED_MAX_PRIORITY) {
53 kfree(ctx->fences);
46 return -EINVAL; 54 return -EINVAL;
55 }
47 rq = &adev->rings[i]->sched.sched_rq[pri]; 56 rq = &adev->rings[i]->sched.sched_rq[pri];
48 r = amd_sched_entity_init(&adev->rings[i]->sched, 57 r = amd_sched_entity_init(&adev->rings[i]->sched,
49 &ctx->rings[i].entity, 58 &ctx->rings[i].entity,
@@ -56,6 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
56 for (j = 0; j < i; j++) 65 for (j = 0; j < i; j++)
57 amd_sched_entity_fini(&adev->rings[j]->sched, 66 amd_sched_entity_fini(&adev->rings[j]->sched,
58 &ctx->rings[j].entity); 67 &ctx->rings[j].entity);
68 kfree(ctx->fences);
59 return r; 69 return r;
60 } 70 }
61 } 71 }
@@ -71,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
71 return; 81 return;
72 82
73 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 83 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
74 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) 84 for (j = 0; j < amdgpu_sched_jobs; ++j)
75 fence_put(ctx->rings[i].fences[j]); 85 fence_put(ctx->rings[i].fences[j]);
86 kfree(ctx->fences);
76 87
77 if (amdgpu_enable_scheduler) { 88 if (amdgpu_enable_scheduler) {
78 for (i = 0; i < adev->num_rings; i++) 89 for (i = 0; i < adev->num_rings; i++)
@@ -241,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
241 unsigned idx = 0; 252 unsigned idx = 0;
242 struct fence *other = NULL; 253 struct fence *other = NULL;
243 254
244 idx = seq % AMDGPU_CTX_MAX_CS_PENDING; 255 idx = seq % amdgpu_sched_jobs;
245 other = cring->fences[idx]; 256 other = cring->fences[idx];
246 if (other) { 257 if (other) {
247 signed long r; 258 signed long r;
@@ -276,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
276 } 287 }
277 288
278 289
279 if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { 290 if (seq + amdgpu_sched_jobs < cring->sequence) {
280 spin_unlock(&ctx->ring_lock); 291 spin_unlock(&ctx->ring_lock);
281 return NULL; 292 return NULL;
282 } 293 }
283 294
284 fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); 295 fence = fence_get(cring->fences[seq % amdgpu_sched_jobs]);
285 spin_unlock(&ctx->ring_lock); 296 spin_unlock(&ctx->ring_lock);
286 297
287 return fence; 298 return fence;