diff options
author | Christian König <christian.koenig@amd.com> | 2018-07-13 03:12:44 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-08-27 12:10:45 -0400 |
commit | 845e6fdf3b52ae8d8cde8ddafa6bbd60214f2bd2 (patch) | |
tree | e7c3f77d7fedeaf0611dbab123aacb1e0ae46072 /drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |
parent | 3798e9a6e6390b873a745d6240ac9646bd2bf514 (diff) |
drm/amdgpu: use scheduler load balancing for SDMA CS
Start to use the scheduler load balancing for userspace SDMA
command submissions.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 02d563cfb4a7..3ff8042b8f89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -48,7 +48,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, | |||
48 | struct drm_file *filp, | 48 | struct drm_file *filp, |
49 | struct amdgpu_ctx *ctx) | 49 | struct amdgpu_ctx *ctx) |
50 | { | 50 | { |
51 | unsigned i, j; | 51 | struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; |
52 | unsigned i, j, num_sdma_rqs; | ||
52 | int r; | 53 | int r; |
53 | 54 | ||
54 | if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) | 55 | if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) |
@@ -80,18 +81,34 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, | |||
80 | ctx->init_priority = priority; | 81 | ctx->init_priority = priority; |
81 | ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; | 82 | ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; |
82 | 83 | ||
83 | /* create context entity for each ring */ | 84 | num_sdma_rqs = 0; |
84 | for (i = 0; i < adev->num_rings; i++) { | 85 | for (i = 0; i < adev->num_rings; i++) { |
85 | struct amdgpu_ring *ring = adev->rings[i]; | 86 | struct amdgpu_ring *ring = adev->rings[i]; |
86 | struct drm_sched_rq *rq; | 87 | struct drm_sched_rq *rq; |
87 | 88 | ||
88 | rq = &ring->sched.sched_rq[priority]; | 89 | rq = &ring->sched.sched_rq[priority]; |
90 | if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) | ||
91 | sdma_rqs[num_sdma_rqs++] = rq; | ||
92 | } | ||
93 | |||
94 | /* create context entity for each ring */ | ||
95 | for (i = 0; i < adev->num_rings; i++) { | ||
96 | struct amdgpu_ring *ring = adev->rings[i]; | ||
89 | 97 | ||
90 | if (ring == &adev->gfx.kiq.ring) | 98 | if (ring == &adev->gfx.kiq.ring) |
91 | continue; | 99 | continue; |
92 | 100 | ||
93 | r = drm_sched_entity_init(&ctx->rings[i].entity, | 101 | if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) { |
94 | &rq, 1, &ctx->guilty); | 102 | r = drm_sched_entity_init(&ctx->rings[i].entity, |
103 | sdma_rqs, num_sdma_rqs, | ||
104 | &ctx->guilty); | ||
105 | } else { | ||
106 | struct drm_sched_rq *rq; | ||
107 | |||
108 | rq = &ring->sched.sched_rq[priority]; | ||
109 | r = drm_sched_entity_init(&ctx->rings[i].entity, | ||
110 | &rq, 1, &ctx->guilty); | ||
111 | } | ||
95 | if (r) | 112 | if (r) |
96 | goto failed; | 113 | goto failed; |
97 | } | 114 | } |