diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 223 |
1 files changed, 173 insertions, 50 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6c66ac8a1891..08bc7722ddb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -25,54 +25,107 @@ | |||
25 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
26 | #include "amdgpu.h" | 26 | #include "amdgpu.h" |
27 | 27 | ||
28 | static void amdgpu_ctx_do_release(struct kref *ref) | 28 | int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, |
29 | struct amdgpu_ctx *ctx) | ||
29 | { | 30 | { |
30 | struct amdgpu_ctx *ctx; | 31 | unsigned i, j; |
31 | struct amdgpu_ctx_mgr *mgr; | 32 | int r; |
32 | 33 | ||
33 | ctx = container_of(ref, struct amdgpu_ctx, refcount); | 34 | memset(ctx, 0, sizeof(*ctx)); |
34 | mgr = &ctx->fpriv->ctx_mgr; | 35 | ctx->adev = adev; |
36 | kref_init(&ctx->refcount); | ||
37 | spin_lock_init(&ctx->ring_lock); | ||
38 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
39 | ctx->rings[i].sequence = 1; | ||
35 | 40 | ||
36 | idr_remove(&mgr->ctx_handles, ctx->id); | 41 | if (amdgpu_enable_scheduler) { |
37 | kfree(ctx); | 42 | /* create context entity for each ring */ |
43 | for (i = 0; i < adev->num_rings; i++) { | ||
44 | struct amd_sched_rq *rq; | ||
45 | if (kernel) | ||
46 | rq = &adev->rings[i]->scheduler->kernel_rq; | ||
47 | else | ||
48 | rq = &adev->rings[i]->scheduler->sched_rq; | ||
49 | r = amd_sched_entity_init(adev->rings[i]->scheduler, | ||
50 | &ctx->rings[i].entity, | ||
51 | rq, amdgpu_sched_jobs); | ||
52 | if (r) | ||
53 | break; | ||
54 | } | ||
55 | |||
56 | if (i < adev->num_rings) { | ||
57 | for (j = 0; j < i; j++) | ||
58 | amd_sched_entity_fini(adev->rings[j]->scheduler, | ||
59 | &ctx->rings[j].entity); | ||
60 | kfree(ctx); | ||
61 | return r; | ||
62 | } | ||
63 | } | ||
64 | return 0; | ||
38 | } | 65 | } |
39 | 66 | ||
40 | int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t *id, uint32_t flags) | 67 | void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) |
68 | { | ||
69 | struct amdgpu_device *adev = ctx->adev; | ||
70 | unsigned i, j; | ||
71 | |||
72 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) | ||
73 | for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) | ||
74 | fence_put(ctx->rings[i].fences[j]); | ||
75 | |||
76 | if (amdgpu_enable_scheduler) { | ||
77 | for (i = 0; i < adev->num_rings; i++) | ||
78 | amd_sched_entity_fini(adev->rings[i]->scheduler, | ||
79 | &ctx->rings[i].entity); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, | ||
84 | struct amdgpu_fpriv *fpriv, | ||
85 | uint32_t *id) | ||
41 | { | 86 | { |
42 | int r; | ||
43 | struct amdgpu_ctx *ctx; | ||
44 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | 87 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
88 | struct amdgpu_ctx *ctx; | ||
89 | int r; | ||
45 | 90 | ||
46 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | 91 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
47 | if (!ctx) | 92 | if (!ctx) |
48 | return -ENOMEM; | 93 | return -ENOMEM; |
49 | 94 | ||
50 | mutex_lock(&mgr->lock); | 95 | mutex_lock(&mgr->lock); |
51 | r = idr_alloc(&mgr->ctx_handles, ctx, 0, 0, GFP_KERNEL); | 96 | r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); |
52 | if (r < 0) { | 97 | if (r < 0) { |
53 | mutex_unlock(&mgr->lock); | 98 | mutex_unlock(&mgr->lock); |
54 | kfree(ctx); | 99 | kfree(ctx); |
55 | return r; | 100 | return r; |
56 | } | 101 | } |
57 | *id = (uint32_t)r; | 102 | *id = (uint32_t)r; |
58 | 103 | r = amdgpu_ctx_init(adev, false, ctx); | |
59 | memset(ctx, 0, sizeof(*ctx)); | ||
60 | ctx->id = *id; | ||
61 | ctx->fpriv = fpriv; | ||
62 | kref_init(&ctx->refcount); | ||
63 | mutex_unlock(&mgr->lock); | 104 | mutex_unlock(&mgr->lock); |
64 | 105 | ||
65 | return 0; | 106 | return r; |
66 | } | 107 | } |
67 | 108 | ||
68 | int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, uint32_t id) | 109 | static void amdgpu_ctx_do_release(struct kref *ref) |
69 | { | 110 | { |
70 | struct amdgpu_ctx *ctx; | 111 | struct amdgpu_ctx *ctx; |
112 | |||
113 | ctx = container_of(ref, struct amdgpu_ctx, refcount); | ||
114 | |||
115 | amdgpu_ctx_fini(ctx); | ||
116 | |||
117 | kfree(ctx); | ||
118 | } | ||
119 | |||
120 | static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) | ||
121 | { | ||
71 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | 122 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
123 | struct amdgpu_ctx *ctx; | ||
72 | 124 | ||
73 | mutex_lock(&mgr->lock); | 125 | mutex_lock(&mgr->lock); |
74 | ctx = idr_find(&mgr->ctx_handles, id); | 126 | ctx = idr_find(&mgr->ctx_handles, id); |
75 | if (ctx) { | 127 | if (ctx) { |
128 | idr_remove(&mgr->ctx_handles, id); | ||
76 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); | 129 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
77 | mutex_unlock(&mgr->lock); | 130 | mutex_unlock(&mgr->lock); |
78 | return 0; | 131 | return 0; |
@@ -86,9 +139,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, | |||
86 | union drm_amdgpu_ctx_out *out) | 139 | union drm_amdgpu_ctx_out *out) |
87 | { | 140 | { |
88 | struct amdgpu_ctx *ctx; | 141 | struct amdgpu_ctx *ctx; |
89 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | 142 | struct amdgpu_ctx_mgr *mgr; |
90 | unsigned reset_counter; | 143 | unsigned reset_counter; |
91 | 144 | ||
145 | if (!fpriv) | ||
146 | return -EINVAL; | ||
147 | |||
148 | mgr = &fpriv->ctx_mgr; | ||
92 | mutex_lock(&mgr->lock); | 149 | mutex_lock(&mgr->lock); |
93 | ctx = idr_find(&mgr->ctx_handles, id); | 150 | ctx = idr_find(&mgr->ctx_handles, id); |
94 | if (!ctx) { | 151 | if (!ctx) { |
@@ -97,8 +154,8 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, | |||
97 | } | 154 | } |
98 | 155 | ||
99 | /* TODO: these two are always zero */ | 156 | /* TODO: these two are always zero */ |
100 | out->state.flags = ctx->state.flags; | 157 | out->state.flags = 0x0; |
101 | out->state.hangs = ctx->state.hangs; | 158 | out->state.hangs = 0x0; |
102 | 159 | ||
103 | /* determine if a GPU reset has occured since the last call */ | 160 | /* determine if a GPU reset has occured since the last call */ |
104 | reset_counter = atomic_read(&adev->gpu_reset_counter); | 161 | reset_counter = atomic_read(&adev->gpu_reset_counter); |
@@ -113,28 +170,11 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, | |||
113 | return 0; | 170 | return 0; |
114 | } | 171 | } |
115 | 172 | ||
116 | void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv) | ||
117 | { | ||
118 | struct idr *idp; | ||
119 | struct amdgpu_ctx *ctx; | ||
120 | uint32_t id; | ||
121 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | ||
122 | idp = &mgr->ctx_handles; | ||
123 | |||
124 | idr_for_each_entry(idp,ctx,id) { | ||
125 | if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) | ||
126 | DRM_ERROR("ctx (id=%ul) is still alive\n",ctx->id); | ||
127 | } | ||
128 | |||
129 | mutex_destroy(&mgr->lock); | ||
130 | } | ||
131 | |||
132 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | 173 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
133 | struct drm_file *filp) | 174 | struct drm_file *filp) |
134 | { | 175 | { |
135 | int r; | 176 | int r; |
136 | uint32_t id; | 177 | uint32_t id; |
137 | uint32_t flags; | ||
138 | 178 | ||
139 | union drm_amdgpu_ctx *args = data; | 179 | union drm_amdgpu_ctx *args = data; |
140 | struct amdgpu_device *adev = dev->dev_private; | 180 | struct amdgpu_device *adev = dev->dev_private; |
@@ -142,15 +182,14 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | |||
142 | 182 | ||
143 | r = 0; | 183 | r = 0; |
144 | id = args->in.ctx_id; | 184 | id = args->in.ctx_id; |
145 | flags = args->in.flags; | ||
146 | 185 | ||
147 | switch (args->in.op) { | 186 | switch (args->in.op) { |
148 | case AMDGPU_CTX_OP_ALLOC_CTX: | 187 | case AMDGPU_CTX_OP_ALLOC_CTX: |
149 | r = amdgpu_ctx_alloc(adev, fpriv, &id, flags); | 188 | r = amdgpu_ctx_alloc(adev, fpriv, &id); |
150 | args->out.alloc.ctx_id = id; | 189 | args->out.alloc.ctx_id = id; |
151 | break; | 190 | break; |
152 | case AMDGPU_CTX_OP_FREE_CTX: | 191 | case AMDGPU_CTX_OP_FREE_CTX: |
153 | r = amdgpu_ctx_free(adev, fpriv, id); | 192 | r = amdgpu_ctx_free(fpriv, id); |
154 | break; | 193 | break; |
155 | case AMDGPU_CTX_OP_QUERY_STATE: | 194 | case AMDGPU_CTX_OP_QUERY_STATE: |
156 | r = amdgpu_ctx_query(adev, fpriv, id, &args->out); | 195 | r = amdgpu_ctx_query(adev, fpriv, id, &args->out); |
@@ -165,7 +204,12 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | |||
165 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) | 204 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) |
166 | { | 205 | { |
167 | struct amdgpu_ctx *ctx; | 206 | struct amdgpu_ctx *ctx; |
168 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | 207 | struct amdgpu_ctx_mgr *mgr; |
208 | |||
209 | if (!fpriv) | ||
210 | return NULL; | ||
211 | |||
212 | mgr = &fpriv->ctx_mgr; | ||
169 | 213 | ||
170 | mutex_lock(&mgr->lock); | 214 | mutex_lock(&mgr->lock); |
171 | ctx = idr_find(&mgr->ctx_handles, id); | 215 | ctx = idr_find(&mgr->ctx_handles, id); |
@@ -177,17 +221,96 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) | |||
177 | 221 | ||
178 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx) | 222 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx) |
179 | { | 223 | { |
180 | struct amdgpu_fpriv *fpriv; | ||
181 | struct amdgpu_ctx_mgr *mgr; | ||
182 | |||
183 | if (ctx == NULL) | 224 | if (ctx == NULL) |
184 | return -EINVAL; | 225 | return -EINVAL; |
185 | 226 | ||
186 | fpriv = ctx->fpriv; | ||
187 | mgr = &fpriv->ctx_mgr; | ||
188 | mutex_lock(&mgr->lock); | ||
189 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); | 227 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
190 | mutex_unlock(&mgr->lock); | ||
191 | |||
192 | return 0; | 228 | return 0; |
193 | } | 229 | } |
230 | |||
231 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | ||
232 | struct fence *fence, uint64_t queued_seq) | ||
233 | { | ||
234 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | ||
235 | uint64_t seq = 0; | ||
236 | unsigned idx = 0; | ||
237 | struct fence *other = NULL; | ||
238 | |||
239 | if (amdgpu_enable_scheduler) | ||
240 | seq = queued_seq; | ||
241 | else | ||
242 | seq = cring->sequence; | ||
243 | idx = seq % AMDGPU_CTX_MAX_CS_PENDING; | ||
244 | other = cring->fences[idx]; | ||
245 | if (other) { | ||
246 | signed long r; | ||
247 | r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); | ||
248 | if (r < 0) | ||
249 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); | ||
250 | } | ||
251 | |||
252 | fence_get(fence); | ||
253 | |||
254 | spin_lock(&ctx->ring_lock); | ||
255 | cring->fences[idx] = fence; | ||
256 | if (!amdgpu_enable_scheduler) | ||
257 | cring->sequence++; | ||
258 | spin_unlock(&ctx->ring_lock); | ||
259 | |||
260 | fence_put(other); | ||
261 | |||
262 | return seq; | ||
263 | } | ||
264 | |||
265 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | ||
266 | struct amdgpu_ring *ring, uint64_t seq) | ||
267 | { | ||
268 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | ||
269 | struct fence *fence; | ||
270 | uint64_t queued_seq; | ||
271 | |||
272 | spin_lock(&ctx->ring_lock); | ||
273 | if (amdgpu_enable_scheduler) | ||
274 | queued_seq = amd_sched_next_queued_seq(&cring->entity); | ||
275 | else | ||
276 | queued_seq = cring->sequence; | ||
277 | |||
278 | if (seq >= queued_seq) { | ||
279 | spin_unlock(&ctx->ring_lock); | ||
280 | return ERR_PTR(-EINVAL); | ||
281 | } | ||
282 | |||
283 | |||
284 | if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) { | ||
285 | spin_unlock(&ctx->ring_lock); | ||
286 | return NULL; | ||
287 | } | ||
288 | |||
289 | fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); | ||
290 | spin_unlock(&ctx->ring_lock); | ||
291 | |||
292 | return fence; | ||
293 | } | ||
294 | |||
295 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) | ||
296 | { | ||
297 | mutex_init(&mgr->lock); | ||
298 | idr_init(&mgr->ctx_handles); | ||
299 | } | ||
300 | |||
301 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) | ||
302 | { | ||
303 | struct amdgpu_ctx *ctx; | ||
304 | struct idr *idp; | ||
305 | uint32_t id; | ||
306 | |||
307 | idp = &mgr->ctx_handles; | ||
308 | |||
309 | idr_for_each_entry(idp, ctx, id) { | ||
310 | if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) | ||
311 | DRM_ERROR("ctx %p is still alive\n", ctx); | ||
312 | } | ||
313 | |||
314 | idr_destroy(&mgr->ctx_handles); | ||
315 | mutex_destroy(&mgr->lock); | ||
316 | } | ||