aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
diff options
context:
space:
mode:
authorAndres Rodriguez <andresx7@gmail.com>2017-02-16 00:47:32 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-05-31 16:49:01 -0400
commiteffd924d2f3b9c52d5bd8137c3803e83f719a290 (patch)
tree2705aaf11786cd79e5f17f4283cc7ff511d9f616 /drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
parentecd910eb1f091dd25f4a737a3bc50c0c8892eac7 (diff)
drm/amdgpu: untie user ring ids from kernel ring ids v6
Add amdgpu_queue_mgr, a mechanism that allows disjointing usermode's ring ids from the kernel's ring ids. The queue manager maintains a per-file descriptor map of user ring ids to amdgpu_ring pointers. Once a map is created it is permanent (this is required to maintain FIFO execution guarantees for a context's ring). Different queue map policies can be configured for each HW IP. Currently all HW IPs use the identity mapper, i.e. kernel ring id is equal to the user ring id. The purpose of this mechanism is to distribute the load across multiple queues more effectively for HW IPs that support multiple rings. Userspace clients are unable to check whether a specific resource is in use by a different client. Therefore, it is up to the kernel driver to make the optimal choice. v2: remove amdgpu_queue_mapper_funcs v3: made amdgpu_queue_mgr per context instead of per-fd v4: add context_put on error paths v5: rebase and include new IPs UVD_ENC & VCN_* v6: drop unused amdgpu_ring_is_valid_index (Alex) Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Andres Rodriguez <andresx7@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c251
1 files changed, 251 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
new file mode 100644
index 000000000000..c13a55352db6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -0,0 +1,251 @@
1/*
2 * Copyright 2017 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Andres Rodriguez
23 */
24
25#include "amdgpu.h"
26#include "amdgpu_ring.h"
27
28static int amdgpu_queue_mapper_init(struct amdgpu_queue_mapper *mapper,
29 int hw_ip)
30{
31 if (!mapper)
32 return -EINVAL;
33
34 if (hw_ip > AMDGPU_MAX_IP_NUM)
35 return -EINVAL;
36
37 mapper->hw_ip = hw_ip;
38 mutex_init(&mapper->lock);
39
40 memset(mapper->queue_map, 0, sizeof(mapper->queue_map));
41
42 return 0;
43}
44
45static struct amdgpu_ring *amdgpu_get_cached_map(struct amdgpu_queue_mapper *mapper,
46 int ring)
47{
48 return mapper->queue_map[ring];
49}
50
51static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
52 int ring, struct amdgpu_ring *pring)
53{
54 if (WARN_ON(mapper->queue_map[ring])) {
55 DRM_ERROR("Un-expected ring re-map\n");
56 return -EINVAL;
57 }
58
59 mapper->queue_map[ring] = pring;
60
61 return 0;
62}
63
64static int amdgpu_identity_map(struct amdgpu_device *adev,
65 struct amdgpu_queue_mapper *mapper,
66 int ring,
67 struct amdgpu_ring **out_ring)
68{
69 switch (mapper->hw_ip) {
70 case AMDGPU_HW_IP_GFX:
71 *out_ring = &adev->gfx.gfx_ring[ring];
72 break;
73 case AMDGPU_HW_IP_COMPUTE:
74 *out_ring = &adev->gfx.compute_ring[ring];
75 break;
76 case AMDGPU_HW_IP_DMA:
77 *out_ring = &adev->sdma.instance[ring].ring;
78 break;
79 case AMDGPU_HW_IP_UVD:
80 *out_ring = &adev->uvd.ring;
81 break;
82 case AMDGPU_HW_IP_VCE:
83 *out_ring = &adev->vce.ring[ring];
84 break;
85 case AMDGPU_HW_IP_UVD_ENC:
86 *out_ring = &adev->uvd.ring_enc[ring];
87 break;
88 case AMDGPU_HW_IP_VCN_DEC:
89 *out_ring = &adev->vcn.ring_dec;
90 break;
91 case AMDGPU_HW_IP_VCN_ENC:
92 *out_ring = &adev->vcn.ring_enc[ring];
93 break;
94 default:
95 *out_ring = NULL;
96 DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
97 return -EINVAL;
98 }
99
100 return amdgpu_update_cached_map(mapper, ring, *out_ring);
101}
102
103/**
104 * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
105 *
106 * @adev: amdgpu_device pointer
107 * @mgr: amdgpu_queue_mgr structure holding queue information
108 *
109 * Initialize the the selected @mgr (all asics).
110 *
111 * Returns 0 on success, error on failure.
112 */
113int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
114 struct amdgpu_queue_mgr *mgr)
115{
116 int i, r;
117
118 if (!adev || !mgr)
119 return -EINVAL;
120
121 memset(mgr, 0, sizeof(*mgr));
122
123 for (i = 0; i < AMDGPU_MAX_IP_NUM; ++i) {
124 r = amdgpu_queue_mapper_init(&mgr->mapper[i], i);
125 if (r)
126 return r;
127 }
128
129 return 0;
130}
131
132/**
133 * amdgpu_queue_mgr_fini - de-initialize an amdgpu_queue_mgr struct
134 *
135 * @adev: amdgpu_device pointer
136 * @mgr: amdgpu_queue_mgr structure holding queue information
137 *
138 * De-initialize the the selected @mgr (all asics).
139 *
140 * Returns 0 on success, error on failure.
141 */
142int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
143 struct amdgpu_queue_mgr *mgr)
144{
145 return 0;
146}
147
148/**
149 * amdgpu_queue_mgr_map - Map a userspace ring id to an amdgpu_ring
150 *
151 * @adev: amdgpu_device pointer
152 * @mgr: amdgpu_queue_mgr structure holding queue information
153 * @hw_ip: HW IP enum
154 * @instance: HW instance
155 * @ring: user ring id
156 * @our_ring: pointer to mapped amdgpu_ring
157 *
158 * Map a userspace ring id to an appropriate kernel ring. Different
159 * policies are configurable at a HW IP level.
160 *
161 * Returns 0 on success, error on failure.
162 */
163int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
164 struct amdgpu_queue_mgr *mgr,
165 int hw_ip, int instance, int ring,
166 struct amdgpu_ring **out_ring)
167{
168 int r, ip_num_rings;
169 struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
170
171 if (!adev || !mgr || !out_ring)
172 return -EINVAL;
173
174 if (hw_ip >= AMDGPU_MAX_IP_NUM)
175 return -EINVAL;
176
177 if (ring >= AMDGPU_MAX_RINGS)
178 return -EINVAL;
179
180 /* Right now all IPs have only one instance - multiple rings. */
181 if (instance != 0) {
182 DRM_ERROR("invalid ip instance: %d\n", instance);
183 return -EINVAL;
184 }
185
186 switch (hw_ip) {
187 case AMDGPU_HW_IP_GFX:
188 ip_num_rings = adev->gfx.num_gfx_rings;
189 break;
190 case AMDGPU_HW_IP_COMPUTE:
191 ip_num_rings = adev->gfx.num_compute_rings;
192 break;
193 case AMDGPU_HW_IP_DMA:
194 ip_num_rings = adev->sdma.num_instances;
195 break;
196 case AMDGPU_HW_IP_UVD:
197 ip_num_rings = 1;
198 break;
199 case AMDGPU_HW_IP_VCE:
200 ip_num_rings = adev->vce.num_rings;
201 break;
202 case AMDGPU_HW_IP_UVD_ENC:
203 ip_num_rings = adev->uvd.num_enc_rings;
204 break;
205 case AMDGPU_HW_IP_VCN_DEC:
206 ip_num_rings = 1;
207 break;
208 case AMDGPU_HW_IP_VCN_ENC:
209 ip_num_rings = adev->vcn.num_enc_rings;
210 break;
211 default:
212 DRM_ERROR("unknown ip type: %d\n", hw_ip);
213 return -EINVAL;
214 }
215
216 if (ring >= ip_num_rings) {
217 DRM_ERROR("Ring index:%d exceeds maximum:%d for ip:%d\n",
218 ring, ip_num_rings, hw_ip);
219 return -EINVAL;
220 }
221
222 mutex_lock(&mapper->lock);
223
224 *out_ring = amdgpu_get_cached_map(mapper, ring);
225 if (*out_ring) {
226 /* cache hit */
227 r = 0;
228 goto out_unlock;
229 }
230
231 switch (mapper->hw_ip) {
232 case AMDGPU_HW_IP_GFX:
233 case AMDGPU_HW_IP_COMPUTE:
234 case AMDGPU_HW_IP_DMA:
235 case AMDGPU_HW_IP_UVD:
236 case AMDGPU_HW_IP_VCE:
237 case AMDGPU_HW_IP_UVD_ENC:
238 case AMDGPU_HW_IP_VCN_DEC:
239 case AMDGPU_HW_IP_VCN_ENC:
240 r = amdgpu_identity_map(adev, mapper, ring, out_ring);
241 break;
242 default:
243 *out_ring = NULL;
244 r = -EINVAL;
245 DRM_ERROR("unknown HW IP type: %d\n", mapper->hw_ip);
246 }
247
248out_unlock:
249 mutex_unlock(&mapper->lock);
250 return r;
251}