diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 106 |
1 files changed, 96 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 75c933b1a432..c184468e2b2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -23,13 +23,41 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
26 | #include <drm/drm_auth.h> | ||
26 | #include "amdgpu.h" | 27 | #include "amdgpu.h" |
28 | #include "amdgpu_sched.h" | ||
27 | 29 | ||
28 | static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) | 30 | static int amdgpu_ctx_priority_permit(struct drm_file *filp, |
31 | enum amd_sched_priority priority) | ||
32 | { | ||
33 | /* NORMAL and below are accessible by everyone */ | ||
34 | if (priority <= AMD_SCHED_PRIORITY_NORMAL) | ||
35 | return 0; | ||
36 | |||
37 | if (capable(CAP_SYS_NICE)) | ||
38 | return 0; | ||
39 | |||
40 | if (drm_is_current_master(filp)) | ||
41 | return 0; | ||
42 | |||
43 | return -EACCES; | ||
44 | } | ||
45 | |||
46 | static int amdgpu_ctx_init(struct amdgpu_device *adev, | ||
47 | enum amd_sched_priority priority, | ||
48 | struct drm_file *filp, | ||
49 | struct amdgpu_ctx *ctx) | ||
29 | { | 50 | { |
30 | unsigned i, j; | 51 | unsigned i, j; |
31 | int r; | 52 | int r; |
32 | 53 | ||
54 | if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX) | ||
55 | return -EINVAL; | ||
56 | |||
57 | r = amdgpu_ctx_priority_permit(filp, priority); | ||
58 | if (r) | ||
59 | return r; | ||
60 | |||
33 | memset(ctx, 0, sizeof(*ctx)); | 61 | memset(ctx, 0, sizeof(*ctx)); |
34 | ctx->adev = adev; | 62 | ctx->adev = adev; |
35 | kref_init(&ctx->refcount); | 63 | kref_init(&ctx->refcount); |
@@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) | |||
39 | if (!ctx->fences) | 67 | if (!ctx->fences) |
40 | return -ENOMEM; | 68 | return -ENOMEM; |
41 | 69 | ||
70 | mutex_init(&ctx->lock); | ||
71 | |||
42 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 72 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
43 | ctx->rings[i].sequence = 1; | 73 | ctx->rings[i].sequence = 1; |
44 | ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; | 74 | ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; |
45 | } | 75 | } |
46 | 76 | ||
47 | ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); | 77 | ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); |
78 | ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); | ||
79 | ctx->init_priority = priority; | ||
80 | ctx->override_priority = AMD_SCHED_PRIORITY_UNSET; | ||
48 | 81 | ||
49 | /* create context entity for each ring */ | 82 | /* create context entity for each ring */ |
50 | for (i = 0; i < adev->num_rings; i++) { | 83 | for (i = 0; i < adev->num_rings; i++) { |
51 | struct amdgpu_ring *ring = adev->rings[i]; | 84 | struct amdgpu_ring *ring = adev->rings[i]; |
52 | struct amd_sched_rq *rq; | 85 | struct amd_sched_rq *rq; |
53 | 86 | ||
54 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; | 87 | rq = &ring->sched.sched_rq[priority]; |
55 | 88 | ||
56 | if (ring == &adev->gfx.kiq.ring) | 89 | if (ring == &adev->gfx.kiq.ring) |
57 | continue; | 90 | continue; |
@@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) | |||
96 | &ctx->rings[i].entity); | 129 | &ctx->rings[i].entity); |
97 | 130 | ||
98 | amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); | 131 | amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); |
132 | |||
133 | mutex_destroy(&ctx->lock); | ||
99 | } | 134 | } |
100 | 135 | ||
101 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, | 136 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, |
102 | struct amdgpu_fpriv *fpriv, | 137 | struct amdgpu_fpriv *fpriv, |
138 | struct drm_file *filp, | ||
139 | enum amd_sched_priority priority, | ||
103 | uint32_t *id) | 140 | uint32_t *id) |
104 | { | 141 | { |
105 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | 142 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; |
@@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, | |||
117 | kfree(ctx); | 154 | kfree(ctx); |
118 | return r; | 155 | return r; |
119 | } | 156 | } |
157 | |||
120 | *id = (uint32_t)r; | 158 | *id = (uint32_t)r; |
121 | r = amdgpu_ctx_init(adev, ctx); | 159 | r = amdgpu_ctx_init(adev, priority, filp, ctx); |
122 | if (r) { | 160 | if (r) { |
123 | idr_remove(&mgr->ctx_handles, *id); | 161 | idr_remove(&mgr->ctx_handles, *id); |
124 | *id = 0; | 162 | *id = 0; |
@@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | |||
193 | { | 231 | { |
194 | int r; | 232 | int r; |
195 | uint32_t id; | 233 | uint32_t id; |
234 | enum amd_sched_priority priority; | ||
196 | 235 | ||
197 | union drm_amdgpu_ctx *args = data; | 236 | union drm_amdgpu_ctx *args = data; |
198 | struct amdgpu_device *adev = dev->dev_private; | 237 | struct amdgpu_device *adev = dev->dev_private; |
@@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | |||
200 | 239 | ||
201 | r = 0; | 240 | r = 0; |
202 | id = args->in.ctx_id; | 241 | id = args->in.ctx_id; |
242 | priority = amdgpu_to_sched_priority(args->in.priority); | ||
243 | |||
244 | /* For backwards compatibility reasons, we need to accept | ||
245 | * ioctls with garbage in the priority field */ | ||
246 | if (priority == AMD_SCHED_PRIORITY_INVALID) | ||
247 | priority = AMD_SCHED_PRIORITY_NORMAL; | ||
203 | 248 | ||
204 | switch (args->in.op) { | 249 | switch (args->in.op) { |
205 | case AMDGPU_CTX_OP_ALLOC_CTX: | 250 | case AMDGPU_CTX_OP_ALLOC_CTX: |
206 | r = amdgpu_ctx_alloc(adev, fpriv, &id); | 251 | r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); |
207 | args->out.alloc.ctx_id = id; | 252 | args->out.alloc.ctx_id = id; |
208 | break; | 253 | break; |
209 | case AMDGPU_CTX_OP_FREE_CTX: | 254 | case AMDGPU_CTX_OP_FREE_CTX: |
@@ -256,12 +301,8 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | |||
256 | 301 | ||
257 | idx = seq & (amdgpu_sched_jobs - 1); | 302 | idx = seq & (amdgpu_sched_jobs - 1); |
258 | other = cring->fences[idx]; | 303 | other = cring->fences[idx]; |
259 | if (other) { | 304 | if (other) |
260 | signed long r; | 305 | BUG_ON(!dma_fence_is_signaled(other)); |
261 | r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT); | ||
262 | if (r < 0) | ||
263 | return r; | ||
264 | } | ||
265 | 306 | ||
266 | dma_fence_get(fence); | 307 | dma_fence_get(fence); |
267 | 308 | ||
@@ -305,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | |||
305 | return fence; | 346 | return fence; |
306 | } | 347 | } |
307 | 348 | ||
349 | void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, | ||
350 | enum amd_sched_priority priority) | ||
351 | { | ||
352 | int i; | ||
353 | struct amdgpu_device *adev = ctx->adev; | ||
354 | struct amd_sched_rq *rq; | ||
355 | struct amd_sched_entity *entity; | ||
356 | struct amdgpu_ring *ring; | ||
357 | enum amd_sched_priority ctx_prio; | ||
358 | |||
359 | ctx->override_priority = priority; | ||
360 | |||
361 | ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ? | ||
362 | ctx->init_priority : ctx->override_priority; | ||
363 | |||
364 | for (i = 0; i < adev->num_rings; i++) { | ||
365 | ring = adev->rings[i]; | ||
366 | entity = &ctx->rings[i].entity; | ||
367 | rq = &ring->sched.sched_rq[ctx_prio]; | ||
368 | |||
369 | if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) | ||
370 | continue; | ||
371 | |||
372 | amd_sched_entity_set_rq(entity, rq); | ||
373 | } | ||
374 | } | ||
375 | |||
376 | int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) | ||
377 | { | ||
378 | struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id]; | ||
379 | unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); | ||
380 | struct dma_fence *other = cring->fences[idx]; | ||
381 | |||
382 | if (other) { | ||
383 | signed long r; | ||
384 | r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); | ||
385 | if (r < 0) { | ||
386 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); | ||
387 | return r; | ||
388 | } | ||
389 | } | ||
390 | |||
391 | return 0; | ||
392 | } | ||
393 | |||
308 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) | 394 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) |
309 | { | 395 | { |
310 | mutex_init(&mgr->lock); | 396 | mutex_init(&mgr->lock); |