diff options
author | Christian König <christian.koenig@amd.com> | 2018-07-16 08:59:26 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-08-27 12:10:46 -0400 |
commit | 72a4c072ca9f2640ea303c399bd3224b69a543d9 (patch) | |
tree | 966f7f7338fdffde7326da2a54b1faa86a5e242b /drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |
parent | 845e6fdf3b52ae8d8cde8ddafa6bbd60214f2bd2 (diff) |
drm/amdgpu: use scheduler load balancing for compute CS
Start to use the scheduler load balancing for userspace compute
command submissions.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 3ff8042b8f89..a078e68e0319 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -49,7 +49,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, | |||
49 | struct amdgpu_ctx *ctx) | 49 | struct amdgpu_ctx *ctx) |
50 | { | 50 | { |
51 | struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; | 51 | struct drm_sched_rq *sdma_rqs[AMDGPU_MAX_RINGS]; |
52 | unsigned i, j, num_sdma_rqs; | 52 | struct drm_sched_rq *comp_rqs[AMDGPU_MAX_RINGS]; |
53 | unsigned i, j, num_sdma_rqs, num_comp_rqs; | ||
53 | int r; | 54 | int r; |
54 | 55 | ||
55 | if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) | 56 | if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) |
@@ -82,6 +83,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, | |||
82 | ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; | 83 | ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; |
83 | 84 | ||
84 | num_sdma_rqs = 0; | 85 | num_sdma_rqs = 0; |
86 | num_comp_rqs = 0; | ||
85 | for (i = 0; i < adev->num_rings; i++) { | 87 | for (i = 0; i < adev->num_rings; i++) { |
86 | struct amdgpu_ring *ring = adev->rings[i]; | 88 | struct amdgpu_ring *ring = adev->rings[i]; |
87 | struct drm_sched_rq *rq; | 89 | struct drm_sched_rq *rq; |
@@ -89,6 +91,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, | |||
89 | rq = &ring->sched.sched_rq[priority]; | 91 | rq = &ring->sched.sched_rq[priority]; |
90 | if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) | 92 | if (ring->funcs->type == AMDGPU_RING_TYPE_SDMA) |
91 | sdma_rqs[num_sdma_rqs++] = rq; | 93 | sdma_rqs[num_sdma_rqs++] = rq; |
94 | else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) | ||
95 | comp_rqs[num_comp_rqs++] = rq; | ||
92 | } | 96 | } |
93 | 97 | ||
94 | /* create context entity for each ring */ | 98 | /* create context entity for each ring */ |
@@ -102,6 +106,10 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, | |||
102 | r = drm_sched_entity_init(&ctx->rings[i].entity, | 106 | r = drm_sched_entity_init(&ctx->rings[i].entity, |
103 | sdma_rqs, num_sdma_rqs, | 107 | sdma_rqs, num_sdma_rqs, |
104 | &ctx->guilty); | 108 | &ctx->guilty); |
109 | } else if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { | ||
110 | r = drm_sched_entity_init(&ctx->rings[i].entity, | ||
111 | comp_rqs, num_comp_rqs, | ||
112 | &ctx->guilty); | ||
105 | } else { | 113 | } else { |
106 | struct drm_sched_rq *rq; | 114 | struct drm_sched_rq *rq; |
107 | 115 | ||