aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
diff options
context:
space:
mode:
authorAndres Rodriguez <andresx7@gmail.com>2017-03-06 16:27:55 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-05-31 16:49:02 -0400
commit795f2813e628bcf57a69f2dfe413360d14a1d7f4 (patch)
treef60bfe602590fde4bd170c263a569cd8147ffdd0 /drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
parenteffd924d2f3b9c52d5bd8137c3803e83f719a290 (diff)
drm/amdgpu: implement lru amdgpu_queue_mgr policy for compute v4
Use an LRU policy to map usermode rings to HW compute queues. Most compute clients use one queue, and usually the first queue available. This results in poor pipe/queue work distribution when multiple compute apps are running. In most cases pipe 0 queue 0 is the only queue that gets used. In order to better distribute work across multiple HW queues, we adopt a policy to map the usermode ring ids to the LRU HW queue. This fixes a large majority of multi-app compute workloads sharing the same HW queue, even though 7 other queues are available. v2: use ring->funcs->type instead of ring->hw_ip v3: remove amdgpu_queue_mapper_funcs v4: change ring_lru_list_lock to spinlock, grab only once in lru_get() Signed-off-by: Andres Rodriguez <andresx7@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c38
1 files changed, 37 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index c13a55352db6..4073f072f6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -100,6 +100,40 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
100 return amdgpu_update_cached_map(mapper, ring, *out_ring); 100 return amdgpu_update_cached_map(mapper, ring, *out_ring);
101} 101}
102 102
103static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
104{
105 switch (hw_ip) {
106 case AMDGPU_HW_IP_GFX:
107 return AMDGPU_RING_TYPE_GFX;
108 case AMDGPU_HW_IP_COMPUTE:
109 return AMDGPU_RING_TYPE_COMPUTE;
110 case AMDGPU_HW_IP_DMA:
111 return AMDGPU_RING_TYPE_SDMA;
112 case AMDGPU_HW_IP_UVD:
113 return AMDGPU_RING_TYPE_UVD;
114 case AMDGPU_HW_IP_VCE:
115 return AMDGPU_RING_TYPE_VCE;
116 default:
117 DRM_ERROR("Invalid HW IP specified %d\n", hw_ip);
118 return -1;
119 }
120}
121
122static int amdgpu_lru_map(struct amdgpu_device *adev,
123 struct amdgpu_queue_mapper *mapper,
124 int user_ring,
125 struct amdgpu_ring **out_ring)
126{
127 int r;
128 int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip);
129
130 r = amdgpu_ring_lru_get(adev, ring_type, out_ring);
131 if (r)
132 return r;
133
134 return amdgpu_update_cached_map(mapper, user_ring, *out_ring);
135}
136
103/** 137/**
104 * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct 138 * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
105 * 139 *
@@ -230,7 +264,6 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
230 264
231 switch (mapper->hw_ip) { 265 switch (mapper->hw_ip) {
232 case AMDGPU_HW_IP_GFX: 266 case AMDGPU_HW_IP_GFX:
233 case AMDGPU_HW_IP_COMPUTE:
234 case AMDGPU_HW_IP_DMA: 267 case AMDGPU_HW_IP_DMA:
235 case AMDGPU_HW_IP_UVD: 268 case AMDGPU_HW_IP_UVD:
236 case AMDGPU_HW_IP_VCE: 269 case AMDGPU_HW_IP_VCE:
@@ -239,6 +272,9 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
239 case AMDGPU_HW_IP_VCN_ENC: 272 case AMDGPU_HW_IP_VCN_ENC:
240 r = amdgpu_identity_map(adev, mapper, ring, out_ring); 273 r = amdgpu_identity_map(adev, mapper, ring, out_ring);
241 break; 274 break;
275 case AMDGPU_HW_IP_COMPUTE:
276 r = amdgpu_lru_map(adev, mapper, ring, out_ring);
277 break;
242 default: 278 default:
243 *out_ring = NULL; 279 *out_ring = NULL;
244 r = -EINVAL; 280 r = -EINVAL;