aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c145
1 files changed, 68 insertions, 77 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index c2290ae20312..08a9292729dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -25,82 +25,27 @@
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include "amdgpu.h" 26#include "amdgpu.h"
27 27
28static void amdgpu_ctx_do_release(struct kref *ref) 28int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
29 struct amdgpu_ctx *ctx)
29{ 30{
30 struct amdgpu_ctx *ctx;
31 struct amdgpu_device *adev;
32 unsigned i, j; 31 unsigned i, j;
32 int r;
33 33
34 ctx = container_of(ref, struct amdgpu_ctx, refcount);
35 adev = ctx->adev;
36
37
38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
39 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
40 fence_put(ctx->rings[i].fences[j]);
41
42 if (amdgpu_enable_scheduler) {
43 for (i = 0; i < adev->num_rings; i++)
44 amd_context_entity_fini(adev->rings[i]->scheduler,
45 &ctx->rings[i].c_entity);
46 }
47
48 kfree(ctx);
49}
50
51static void amdgpu_ctx_init(struct amdgpu_device *adev,
52 struct amdgpu_fpriv *fpriv,
53 struct amdgpu_ctx *ctx)
54{
55 int i;
56 memset(ctx, 0, sizeof(*ctx)); 34 memset(ctx, 0, sizeof(*ctx));
57 ctx->adev = adev; 35 ctx->adev = adev;
58 kref_init(&ctx->refcount); 36 kref_init(&ctx->refcount);
59 spin_lock_init(&ctx->ring_lock); 37 spin_lock_init(&ctx->ring_lock);
60 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 38 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
61 ctx->rings[i].sequence = 1; 39 ctx->rings[i].sequence = 1;
62}
63
64int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
65 uint32_t *id)
66{
67 struct amdgpu_ctx *ctx;
68 int i, j, r;
69
70 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
71 if (!ctx)
72 return -ENOMEM;
73 if (fpriv) {
74 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
75 mutex_lock(&mgr->lock);
76 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
77 if (r < 0) {
78 mutex_unlock(&mgr->lock);
79 kfree(ctx);
80 return r;
81 }
82 *id = (uint32_t)r;
83 amdgpu_ctx_init(adev, fpriv, ctx);
84 mutex_unlock(&mgr->lock);
85 } else {
86 if (adev->kernel_ctx) {
87 DRM_ERROR("kernel cnotext has been created.\n");
88 kfree(ctx);
89 return 0;
90 }
91 amdgpu_ctx_init(adev, fpriv, ctx);
92
93 adev->kernel_ctx = ctx;
94 }
95 40
96 if (amdgpu_enable_scheduler) { 41 if (amdgpu_enable_scheduler) {
97 /* create context entity for each ring */ 42 /* create context entity for each ring */
98 for (i = 0; i < adev->num_rings; i++) { 43 for (i = 0; i < adev->num_rings; i++) {
99 struct amd_run_queue *rq; 44 struct amd_run_queue *rq;
100 if (fpriv) 45 if (kernel)
101 rq = &adev->rings[i]->scheduler->sched_rq;
102 else
103 rq = &adev->rings[i]->scheduler->kernel_rq; 46 rq = &adev->rings[i]->scheduler->kernel_rq;
47 else
48 rq = &adev->rings[i]->scheduler->sched_rq;
104 r = amd_context_entity_init(adev->rings[i]->scheduler, 49 r = amd_context_entity_init(adev->rings[i]->scheduler,
105 &ctx->rings[i].c_entity, 50 &ctx->rings[i].c_entity,
106 NULL, rq, amdgpu_sched_jobs); 51 NULL, rq, amdgpu_sched_jobs);
@@ -113,33 +58,79 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
113 amd_context_entity_fini(adev->rings[j]->scheduler, 58 amd_context_entity_fini(adev->rings[j]->scheduler,
114 &ctx->rings[j].c_entity); 59 &ctx->rings[j].c_entity);
115 kfree(ctx); 60 kfree(ctx);
116 return -EINVAL; 61 return r;
117 } 62 }
118 } 63 }
119
120 return 0; 64 return 0;
121} 65}
122 66
123int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id) 67void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
124{ 68{
69 struct amdgpu_device *adev = ctx->adev;
70 unsigned i, j;
71
72 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
73 for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
74 fence_put(ctx->rings[i].fences[j]);
75
76 if (amdgpu_enable_scheduler) {
77 for (i = 0; i < adev->num_rings; i++)
78 amd_context_entity_fini(adev->rings[i]->scheduler,
79 &ctx->rings[i].c_entity);
80 }
81}
82
83static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
84 struct amdgpu_fpriv *fpriv,
85 uint32_t *id)
86{
87 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
125 struct amdgpu_ctx *ctx; 88 struct amdgpu_ctx *ctx;
89 int r;
126 90
127 if (fpriv) { 91 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
128 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; 92 if (!ctx)
129 mutex_lock(&mgr->lock); 93 return -ENOMEM;
130 ctx = idr_find(&mgr->ctx_handles, id); 94
131 if (ctx) { 95 mutex_lock(&mgr->lock);
132 idr_remove(&mgr->ctx_handles, id); 96 r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
133 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 97 if (r < 0) {
134 mutex_unlock(&mgr->lock);
135 return 0;
136 }
137 mutex_unlock(&mgr->lock); 98 mutex_unlock(&mgr->lock);
138 } else { 99 kfree(ctx);
139 ctx = adev->kernel_ctx; 100 return r;
101 }
102 *id = (uint32_t)r;
103 r = amdgpu_ctx_init(adev, false, ctx);
104 mutex_unlock(&mgr->lock);
105
106 return r;
107}
108
109static void amdgpu_ctx_do_release(struct kref *ref)
110{
111 struct amdgpu_ctx *ctx;
112
113 ctx = container_of(ref, struct amdgpu_ctx, refcount);
114
115 amdgpu_ctx_fini(ctx);
116
117 kfree(ctx);
118}
119
120static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
121{
122 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
123 struct amdgpu_ctx *ctx;
124
125 mutex_lock(&mgr->lock);
126 ctx = idr_find(&mgr->ctx_handles, id);
127 if (ctx) {
128 idr_remove(&mgr->ctx_handles, id);
140 kref_put(&ctx->refcount, amdgpu_ctx_do_release); 129 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
130 mutex_unlock(&mgr->lock);
141 return 0; 131 return 0;
142 } 132 }
133 mutex_unlock(&mgr->lock);
143 return -EINVAL; 134 return -EINVAL;
144} 135}
145 136
@@ -198,7 +189,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
198 args->out.alloc.ctx_id = id; 189 args->out.alloc.ctx_id = id;
199 break; 190 break;
200 case AMDGPU_CTX_OP_FREE_CTX: 191 case AMDGPU_CTX_OP_FREE_CTX:
201 r = amdgpu_ctx_free(adev, fpriv, id); 192 r = amdgpu_ctx_free(fpriv, id);
202 break; 193 break;
203 case AMDGPU_CTX_OP_QUERY_STATE: 194 case AMDGPU_CTX_OP_QUERY_STATE:
204 r = amdgpu_ctx_query(adev, fpriv, id, &args->out); 195 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);