diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-07-21 01:17:19 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-17 16:50:32 -0400 |
commit | 9cb7e5a91f6cd4dc018cca7120d2da067f816d3a (patch) | |
tree | 6d221842a748e103ad700841d94f7064a65c87d0 | |
parent | b80d8475c1fdf5f4bcabb65168b2e8a9c3d77731 (diff) |
drm/amdgpu: add context entity init
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 36 |
2 files changed, 37 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 815d40f5e6e1..776339c2a95e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -994,10 +994,12 @@ struct amdgpu_vm_manager { | |||
994 | struct amdgpu_ctx_ring { | 994 | struct amdgpu_ctx_ring { |
995 | uint64_t sequence; | 995 | uint64_t sequence; |
996 | struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; | 996 | struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; |
997 | struct amd_context_entity c_entity; | ||
997 | }; | 998 | }; |
998 | 999 | ||
999 | struct amdgpu_ctx { | 1000 | struct amdgpu_ctx { |
1000 | struct kref refcount; | 1001 | struct kref refcount; |
1002 | struct amdgpu_device *adev; | ||
1001 | unsigned reset_counter; | 1003 | unsigned reset_counter; |
1002 | spinlock_t ring_lock; | 1004 | spinlock_t ring_lock; |
1003 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; | 1005 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 144edc97c6fe..557fb60f416b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -28,13 +28,23 @@ | |||
28 | static void amdgpu_ctx_do_release(struct kref *ref) | 28 | static void amdgpu_ctx_do_release(struct kref *ref) |
29 | { | 29 | { |
30 | struct amdgpu_ctx *ctx; | 30 | struct amdgpu_ctx *ctx; |
31 | struct amdgpu_device *adev; | ||
31 | unsigned i, j; | 32 | unsigned i, j; |
32 | 33 | ||
33 | ctx = container_of(ref, struct amdgpu_ctx, refcount); | 34 | ctx = container_of(ref, struct amdgpu_ctx, refcount); |
35 | adev = ctx->adev; | ||
36 | |||
34 | 37 | ||
35 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | 38 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
36 | for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) | 39 | for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) |
37 | fence_put(ctx->rings[i].fences[j]); | 40 | fence_put(ctx->rings[i].fences[j]); |
41 | |||
42 | if (amdgpu_enable_scheduler) { | ||
43 | for (i = 0; i < adev->num_rings; i++) | ||
44 | amd_context_entity_fini(adev->rings[i]->scheduler, | ||
45 | &ctx->rings[i].c_entity); | ||
46 | } | ||
47 | |||
38 | kfree(ctx); | 48 | kfree(ctx); |
39 | } | 49 | } |
40 | 50 | ||
@@ -43,7 +53,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, | |||
43 | { | 53 | { |
44 | struct amdgpu_ctx *ctx; | 54 | struct amdgpu_ctx *ctx; |
45 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | 55 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
46 | int i, r; | 56 | int i, j, r; |
47 | 57 | ||
48 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 58 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
49 | if (!ctx) | 59 | if (!ctx) |
@@ -59,11 +69,35 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, | |||
59 | *id = (uint32_t)r; | 69 | *id = (uint32_t)r; |
60 | 70 | ||
61 | memset(ctx, 0, sizeof(*ctx)); | 71 | memset(ctx, 0, sizeof(*ctx)); |
72 | ctx->adev = adev; | ||
62 | kref_init(&ctx->refcount); | 73 | kref_init(&ctx->refcount); |
63 | spin_lock_init(&ctx->ring_lock); | 74 | spin_lock_init(&ctx->ring_lock); |
64 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | 75 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
65 | ctx->rings[i].sequence = 1; | 76 | ctx->rings[i].sequence = 1; |
66 | mutex_unlock(&mgr->lock); | 77 | mutex_unlock(&mgr->lock); |
78 | if (amdgpu_enable_scheduler) { | ||
79 | /* create context entity for each ring */ | ||
80 | for (i = 0; i < adev->num_rings; i++) { | ||
81 | struct amd_run_queue *rq; | ||
82 | if (fpriv) | ||
83 | rq = &adev->rings[i]->scheduler->sched_rq; | ||
84 | else | ||
85 | rq = &adev->rings[i]->scheduler->kernel_rq; | ||
86 | r = amd_context_entity_init(adev->rings[i]->scheduler, | ||
87 | &ctx->rings[i].c_entity, | ||
88 | NULL, rq, *id); | ||
89 | if (r) | ||
90 | break; | ||
91 | } | ||
92 | |||
93 | if (i < adev->num_rings) { | ||
94 | for (j = 0; j < i; j++) | ||
95 | amd_context_entity_fini(adev->rings[j]->scheduler, | ||
96 | &ctx->rings[j].c_entity); | ||
97 | kfree(ctx); | ||
98 | return -EINVAL; | ||
99 | } | ||
100 | } | ||
67 | 101 | ||
68 | return 0; | 102 | return 0; |
69 | } | 103 | } |